Spaces:
Sleeping
Sleeping
app push
Browse files- README.md +5 -5
- app.py +334 -0
- data_generator.py +172 -0
- gitattributes +35 -0
- rct_analyzer.py +69 -0
- rct_simulator.py +138 -0
- requirements.txt +9 -0
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
1 |
---
|
2 |
+
title: Example2
|
3 |
+
emoji: 🐨
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.39.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
app.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import seaborn as sns
|
5 |
+
import numpy as np
|
6 |
+
import plotly
|
7 |
+
import plotly.graph_objs as go
|
8 |
+
from plotly.subplots import make_subplots
|
9 |
+
from sklearn.preprocessing import StandardScaler
|
10 |
+
from causalml.inference.meta import BaseTClassifier
|
11 |
+
from sklearn.ensemble import RandomForestClassifier
|
12 |
+
from data_generator import generate_synthetic_data
|
13 |
+
from rct_simulator import run_rct_simulation
|
14 |
+
from rct_analyzer import analyze_rct_results
|
15 |
+
|
16 |
+
# Global variables to store generated data and RCT results
|
17 |
+
generated_data = None
|
18 |
+
rct_results = None
|
19 |
+
|
20 |
+
def perform_eda(discount_level):
|
21 |
+
global rct_results, generated_data
|
22 |
+
if rct_results is None or generated_data is None:
|
23 |
+
return "Please generate customer data and run RCT simulation first.", None, None, None, None
|
24 |
+
|
25 |
+
transactions_df, variant_assignments_df = rct_results
|
26 |
+
|
27 |
+
# Merge data
|
28 |
+
merged_df = pd.merge(generated_data, variant_assignments_df, on='customer_id', how='inner')
|
29 |
+
merged_df = pd.merge(merged_df, transactions_df, on=['customer_id', 'variant'], how='left')
|
30 |
+
merged_df['purchase'] = merged_df['purchase'].fillna(0)
|
31 |
+
merged_df['profit'] = merged_df['profit'].fillna(0)
|
32 |
+
|
33 |
+
# Filter for control and selected discount level
|
34 |
+
filtered_df = merged_df[merged_df['variant'].isin(['Control', discount_level])]
|
35 |
+
|
36 |
+
# Analyze newsletter_subscription
|
37 |
+
newsletter_results = analyze_feature(filtered_df, 'newsletter_subscription')
|
38 |
+
|
39 |
+
# Analyze preferred_payment_method
|
40 |
+
payment_results = analyze_feature(filtered_df, 'preferred_payment_method')
|
41 |
+
|
42 |
+
# Create plots
|
43 |
+
newsletter_fig = create_bar_plot(newsletter_results, 'newsletter_subscription', discount_level)
|
44 |
+
payment_fig = create_bar_plot(payment_results, 'preferred_payment_method', discount_level)
|
45 |
+
|
46 |
+
return (f"EDA completed for {discount_level}",
|
47 |
+
newsletter_results, payment_results, newsletter_fig, payment_fig)
|
48 |
+
|
49 |
+
def analyze_feature(df, feature):
|
50 |
+
control_df = df[df['variant'] == 'Control']
|
51 |
+
treatment_df = df[df['variant'] != 'Control']
|
52 |
+
|
53 |
+
control_stats = control_df.groupby(feature).agg({
|
54 |
+
'purchase': 'sum',
|
55 |
+
'profit': 'sum'
|
56 |
+
}).reset_index()
|
57 |
+
|
58 |
+
treatment_stats = treatment_df.groupby(feature).agg({
|
59 |
+
'purchase': 'sum',
|
60 |
+
'profit': 'sum'
|
61 |
+
}).reset_index()
|
62 |
+
|
63 |
+
results = pd.merge(control_stats, treatment_stats, on=feature, suffixes=('_control', '_treatment'))
|
64 |
+
results['incremental_purchases'] = results['purchase_treatment'] - results['purchase_control']
|
65 |
+
results['incremental_profit'] = results['profit_treatment'] - results['profit_control']
|
66 |
+
|
67 |
+
return results
|
68 |
+
|
69 |
+
def create_bar_plot(data, feature, discount_level):
|
70 |
+
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
|
71 |
+
|
72 |
+
data[feature] = data[feature].astype(str) # Ensure the feature is treated as a string
|
73 |
+
|
74 |
+
ax1.bar(data[feature], data['incremental_purchases'])
|
75 |
+
ax1.set_title(f'Incremental Purchases by {feature}\n({discount_level})', fontsize=14)
|
76 |
+
ax1.set_xlabel(feature)
|
77 |
+
ax1.set_ylabel('Incremental Purchases')
|
78 |
+
ax1.tick_params(axis='x', rotation=45)
|
79 |
+
|
80 |
+
ax2.bar(data[feature], data['incremental_profit'])
|
81 |
+
ax2.set_title(f'Incremental Profit by {feature}\n({discount_level})', fontsize=14)
|
82 |
+
ax2.set_xlabel(feature)
|
83 |
+
ax2.set_ylabel('Incremental Profit')
|
84 |
+
ax2.tick_params(axis='x', rotation=45)
|
85 |
+
|
86 |
+
plt.tight_layout()
|
87 |
+
return fig
|
88 |
+
|
89 |
+
|
90 |
+
def generate_and_display_data(num_customers):
|
91 |
+
global generated_data
|
92 |
+
generated_data = generate_synthetic_data(num_customers=num_customers)
|
93 |
+
|
94 |
+
df_basic_info = generated_data[['customer_id', 'name', 'email', 'age', 'gender', 'region', 'city',
|
95 |
+
'registration_date', 'phone_number', 'preferred_language',
|
96 |
+
'newsletter_subscription', 'preferred_payment_method']]
|
97 |
+
|
98 |
+
df_extra_info = generated_data[['customer_id', 'loyalty_level', 'main_browsing_device',
|
99 |
+
'product_categories_of_interest', 'average_order_value',
|
100 |
+
'total_orders', 'last_order_date']]
|
101 |
+
|
102 |
+
sample_basic = df_basic_info.sample(n=min(10, len(df_basic_info)))
|
103 |
+
sample_extra = df_extra_info.sample(n=min(10, len(df_extra_info)))
|
104 |
+
|
105 |
+
return (sample_basic, sample_extra,
|
106 |
+
f"Generated {num_customers} records. Displaying samples of 10 rows for each dataset.")
|
107 |
+
|
108 |
+
def run_and_display_rct(experiment_duration):
|
109 |
+
global generated_data, rct_results
|
110 |
+
if generated_data is None:
|
111 |
+
return None, None, "Please generate customer data first."
|
112 |
+
|
113 |
+
transactions_df, variant_assignments_df = run_rct_simulation(generated_data, experiment_duration)
|
114 |
+
rct_results = (transactions_df, variant_assignments_df) # Store both DataFrames as a tuple
|
115 |
+
|
116 |
+
sample_assignments = variant_assignments_df.sample(n=min(10, len(variant_assignments_df)))
|
117 |
+
sample_transactions = transactions_df.sample(n=min(10, len(transactions_df)))
|
118 |
+
|
119 |
+
return (sample_assignments, sample_transactions,
|
120 |
+
f"Ran RCT simulation for {experiment_duration} days. Displaying samples of 10 rows for each dataset.")
|
121 |
+
|
122 |
+
def analyze_and_display_results():
|
123 |
+
global rct_results
|
124 |
+
if rct_results is None:
|
125 |
+
return None, None, None, "Please run the RCT simulation first."
|
126 |
+
|
127 |
+
transactions_df, variant_assignments_df = rct_results
|
128 |
+
overall_df, variant_df, fig = analyze_rct_results(transactions_df, variant_assignments_df)
|
129 |
+
return overall_df, variant_df, fig, "Analysis complete. Displaying results and visualizations."
|
130 |
+
|
131 |
+
def build_uplift_model(data, features, treatment, control):
|
132 |
+
# Prepare the data
|
133 |
+
treatment_data = data[data['variant'] == treatment]
|
134 |
+
control_data = data[data['variant'] == control]
|
135 |
+
combined_data = pd.concat([treatment_data, control_data])
|
136 |
+
|
137 |
+
# Create dummy variables for categorical features
|
138 |
+
categorical_features = [f for f in features if data[f].dtype == 'object']
|
139 |
+
X = pd.get_dummies(data[features], columns=categorical_features)
|
140 |
+
|
141 |
+
# Standardize numerical features
|
142 |
+
numerical_features = [f for f in features if data[f].dtype in ['int64', 'float64']]
|
143 |
+
scaler = StandardScaler()
|
144 |
+
X[numerical_features] = scaler.fit_transform(X[numerical_features])
|
145 |
+
|
146 |
+
# Prepare y and treatment for the combined data
|
147 |
+
y = combined_data['purchase']
|
148 |
+
t = (combined_data['variant'] == treatment).astype(int)
|
149 |
+
|
150 |
+
# Create and fit the RandomForestClassifier directly
|
151 |
+
rf_model = RandomForestClassifier(n_estimators=50, max_depth=4)
|
152 |
+
rf_model.fit(X.loc[combined_data.index], y)
|
153 |
+
|
154 |
+
# Get feature importances from the RandomForestClassifier
|
155 |
+
feature_importances = rf_model.feature_importances_
|
156 |
+
|
157 |
+
# Create a dataframe with feature names and their importances
|
158 |
+
feature_importance_df = pd.DataFrame({
|
159 |
+
'feature': X.columns,
|
160 |
+
'importance': feature_importances
|
161 |
+
}).sort_values('importance', ascending=False)
|
162 |
+
|
163 |
+
# Create and fit the BaseTClassifier model
|
164 |
+
model = BaseTClassifier(RandomForestClassifier(n_estimators=50, max_depth=4))
|
165 |
+
model.fit(X=X.loc[combined_data.index].values, treatment=t, y=y)
|
166 |
+
|
167 |
+
# Predict for all data
|
168 |
+
uplift_scores = model.predict(X.values)
|
169 |
+
|
170 |
+
# Handle 2D output if necessary
|
171 |
+
if uplift_scores.ndim == 2:
|
172 |
+
if uplift_scores.shape[1] == 2:
|
173 |
+
uplift_scores = uplift_scores[:, 1] - uplift_scores[:, 0]
|
174 |
+
elif uplift_scores.shape[1] == 1:
|
175 |
+
uplift_scores = uplift_scores.flatten()
|
176 |
+
|
177 |
+
return uplift_scores, feature_importance_df
|
178 |
+
|
179 |
+
def build_model_and_display(selected_features, treatment):
|
180 |
+
global rct_results, generated_data
|
181 |
+
if rct_results is None or generated_data is None:
|
182 |
+
return "Please generate customer data and run RCT simulation first.", None, None
|
183 |
+
|
184 |
+
transactions_df, variant_assignments_df = rct_results
|
185 |
+
|
186 |
+
# Prepare the data
|
187 |
+
df_with_variant = pd.merge(generated_data, variant_assignments_df, on='customer_id', how='inner')
|
188 |
+
transactions_df['purchase'] = 1
|
189 |
+
final_df = pd.merge(df_with_variant, transactions_df, on=['customer_id', 'variant'], how='left')
|
190 |
+
columns_to_fill = ['purchase', 'price', 'discounted_price', 'cost', 'profit']
|
191 |
+
final_df[columns_to_fill] = final_df[columns_to_fill].fillna(0)
|
192 |
+
|
193 |
+
# Build the model
|
194 |
+
uplift_scores, feature_importance_df = build_uplift_model(final_df, selected_features, treatment, 'Control')
|
195 |
+
|
196 |
+
# Calculate statistics
|
197 |
+
stats = pd.DataFrame({
|
198 |
+
'Metric': ['Mean', 'Std', 'Min', 'Max'],
|
199 |
+
'Value': [
|
200 |
+
np.mean(uplift_scores),
|
201 |
+
np.std(uplift_scores),
|
202 |
+
np.min(uplift_scores),
|
203 |
+
np.max(uplift_scores)
|
204 |
+
]
|
205 |
+
})
|
206 |
+
|
207 |
+
# Create feature importance plot
|
208 |
+
fig, ax = plt.subplots(figsize=(10, 6))
|
209 |
+
sns.barplot(x='importance', y='feature', data=feature_importance_df.head(10), ax=ax)
|
210 |
+
ax.set_title(f'Top 10 Feature Importance for {treatment} vs Control')
|
211 |
+
ax.set_xlabel('Importance')
|
212 |
+
ax.set_ylabel('Feature')
|
213 |
+
plt.tight_layout()
|
214 |
+
|
215 |
+
info = f"Uplift model built using {len(selected_features)} features.\n"
|
216 |
+
info += f"Treatment: {treatment} vs Control\n"
|
217 |
+
info += f"Number of samples: {len(uplift_scores)}"
|
218 |
+
|
219 |
+
return info, stats, fig
|
220 |
+
|
221 |
+
with gr.Blocks() as demo:
|
222 |
+
gr.Markdown("# Causal AI - Synthetic Customer Data Generator and RCT Simulator")
|
223 |
+
|
224 |
+
with gr.Tab("Generate Customer Data"):
|
225 |
+
gr.Markdown("# Generate Synthetic Customers data")
|
226 |
+
gr.Markdown("In this section we generate typical data of customers that are registered to our store.")
|
227 |
+
gr.Markdown("First we generate some basic attributes that are defined when the customer first registers, such as Name, City or Preferred Language.")
|
228 |
+
gr.Markdown("Then we add some extra information that is usually the result of the customer past behavior, such as Loyalty Level, Past Purchases or Categories of interest.")
|
229 |
+
gr.Markdown("## Select the number of customers that you want to Generate")
|
230 |
+
num_customers_input = gr.Slider(minimum=10000, maximum=500000, value=50000, step=1000, label="Number of Customer Records")
|
231 |
+
generate_btn = gr.Button("Generate Customer Data")
|
232 |
+
gr.Markdown("## Basic Customer Info Sample")
|
233 |
+
basic_info_output = gr.DataFrame()
|
234 |
+
gr.Markdown("## Extra Customer Info Sample")
|
235 |
+
extra_info_output = gr.DataFrame()
|
236 |
+
generate_info = gr.Textbox(label="Generation Info")
|
237 |
+
|
238 |
+
generate_btn.click(fn=generate_and_display_data,
|
239 |
+
inputs=num_customers_input,
|
240 |
+
outputs=[basic_info_output, extra_info_output, generate_info])
|
241 |
+
|
242 |
+
with gr.Tab("Run RCT Simulation"):
|
243 |
+
gr.Markdown("# Run a Randomized Control Experiment for data collection and analysis")
|
244 |
+
gr.Markdown("In this section we simulate running an Experiment where we offer customers different levels of discounts in the Electronics department.")
|
245 |
+
gr.Markdown("We randomly split the customers in 4 groups: Control, 5% discount, 10% discount and 15% discount")
|
246 |
+
gr.Markdown("During the experiment runtime we record all the purchases made by the customers. We can decide how long to run the experiment for, where longer periods lead to less noise and more significance in the results.")
|
247 |
+
experiment_duration_input = gr.Slider(minimum=10, maximum=60, value=30, step=1, label="Experiment Duration (days)")
|
248 |
+
rct_btn = gr.Button("Run RCT Simulation")
|
249 |
+
gr.Markdown("## Customer assigment to experiment group:")
|
250 |
+
assignments_output = gr.DataFrame()
|
251 |
+
gr.Markdown("## Purchases made during experiment runtime:")
|
252 |
+
transactions_output = gr.DataFrame()
|
253 |
+
rct_info = gr.Textbox(label="RCT Simulation Info")
|
254 |
+
rct_btn.click(fn=run_and_display_rct,
|
255 |
+
inputs=experiment_duration_input,
|
256 |
+
outputs=[assignments_output, transactions_output, rct_info])
|
257 |
+
|
258 |
+
with gr.Tab("Analyze RCT Results"):
|
259 |
+
gr.Markdown("# Experiment Analysis")
|
260 |
+
gr.Markdown("In this section we analyze the experiment results. We measure, per each discount value (5%, 10%, 15%) what is the incremental number of Purchases and the incremental Profit compared to the Control group.")
|
261 |
+
analyze_btn = gr.Button("Analyze RCT Results")
|
262 |
+
gr.Markdown("## Overall metrics")
|
263 |
+
overall_metrics_output = gr.DataFrame()
|
264 |
+
gr.Markdown("## Metrics by Variant")
|
265 |
+
variant_metrics_output = gr.DataFrame()
|
266 |
+
gr.Markdown("## Metrics per Variant visualization")
|
267 |
+
gr.Markdown("## To-Do: Add confidence intervals")
|
268 |
+
plot_output = gr.Plot()
|
269 |
+
analysis_info = gr.Textbox(label="Analysis Info")
|
270 |
+
|
271 |
+
analyze_btn.click(fn=analyze_and_display_results,
|
272 |
+
inputs=[],
|
273 |
+
outputs=[overall_metrics_output, variant_metrics_output, plot_output, analysis_info])
|
274 |
+
|
275 |
+
with gr.Tab("Exploratory Data Analysis"):
|
276 |
+
gr.Markdown("# Exploratory Data Analysis")
|
277 |
+
gr.Markdown("In this section, we explore the impact of discounts on different customer segments.")
|
278 |
+
|
279 |
+
discount_dropdown = gr.Dropdown(
|
280 |
+
choices=['5% discount', '10% discount', '15% discount'],
|
281 |
+
label="Select discount level to analyze",
|
282 |
+
value='10% discount'
|
283 |
+
)
|
284 |
+
|
285 |
+
eda_btn = gr.Button("Perform EDA")
|
286 |
+
|
287 |
+
eda_info = gr.Textbox(label="EDA Information")
|
288 |
+
|
289 |
+
gr.Markdown("## Newsletter Subscription Analysis")
|
290 |
+
newsletter_results = gr.DataFrame(label="Newsletter Subscription Results")
|
291 |
+
newsletter_plot = gr.Plot(label="Newsletter Subscription Plot")
|
292 |
+
|
293 |
+
gr.Markdown("## Preferred Payment Method Analysis")
|
294 |
+
payment_results = gr.DataFrame(label="Preferred Payment Method Results")
|
295 |
+
payment_plot = gr.Plot(label="Preferred Payment Method Plot")
|
296 |
+
|
297 |
+
eda_btn.click(
|
298 |
+
fn=perform_eda,
|
299 |
+
inputs=[discount_dropdown],
|
300 |
+
outputs=[eda_info, newsletter_results, payment_results, newsletter_plot, payment_plot]
|
301 |
+
)
|
302 |
+
|
303 |
+
with gr.Tab("Build Uplift Model"):
|
304 |
+
gr.Markdown("## Build Uplift Model")
|
305 |
+
|
306 |
+
# Feature selection
|
307 |
+
feature_checklist = gr.CheckboxGroup(
|
308 |
+
choices=['age', 'gender', 'region', 'preferred_language', 'newsletter_subscription',
|
309 |
+
'preferred_payment_method', 'loyalty_level', 'main_browsing_device',
|
310 |
+
'average_order_value', 'total_orders'],
|
311 |
+
label="Select features for the model",
|
312 |
+
value=['age', 'gender', 'loyalty_level', 'average_order_value', 'total_orders']
|
313 |
+
)
|
314 |
+
|
315 |
+
# Dropdown for selecting treatment
|
316 |
+
treatment_dropdown = gr.Dropdown(
|
317 |
+
choices=['5% discount', '10% discount', '15% discount'],
|
318 |
+
label="Select treatment",
|
319 |
+
value='10% discount'
|
320 |
+
)
|
321 |
+
|
322 |
+
build_model_btn = gr.Button("Build Uplift Model")
|
323 |
+
|
324 |
+
model_info = gr.Textbox(label="Model Information")
|
325 |
+
uplift_stats = gr.Dataframe(label="Uplift Score Statistics")
|
326 |
+
feature_importance_plot = gr.Plot(label="Feature Importance")
|
327 |
+
|
328 |
+
build_model_btn.click(
|
329 |
+
fn=build_model_and_display,
|
330 |
+
inputs=[feature_checklist, treatment_dropdown],
|
331 |
+
outputs=[model_info, uplift_stats, feature_importance_plot]
|
332 |
+
)
|
333 |
+
|
334 |
+
demo.launch()
|
data_generator.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
from faker import Faker
|
4 |
+
from datetime import datetime, timedelta, date
|
5 |
+
import random
|
6 |
+
|
7 |
+
def generate_synthetic_data(num_customers=1000):
|
8 |
+
# Set up Faker for Ukrainian locale
|
9 |
+
fake = Faker('uk_UA')
|
10 |
+
Faker.seed(42)
|
11 |
+
np.random.seed(42)
|
12 |
+
|
13 |
+
# Define constants
|
14 |
+
NUM_CUSTOMERS = num_customers
|
15 |
+
START_DATE = date(2019, 1, 1)
|
16 |
+
END_DATE = date(2024, 7, 31)
|
17 |
+
|
18 |
+
# Helper functions
|
19 |
+
def generate_phone_number():
|
20 |
+
return f"+380{random.randint(50, 99)}{fake.msisdn()[6:]}"
|
21 |
+
|
22 |
+
def generate_email(name):
|
23 |
+
username = name.lower().replace(' ', '.').replace('\'', '')
|
24 |
+
domain = random.choice(['gmail.com', 'ukr.net', 'i.ua', 'meta.ua', 'yahoo.com'])
|
25 |
+
return f"{username}@{domain}"
|
26 |
+
|
27 |
+
# Define regions and their characteristics
|
28 |
+
REGIONS = {
|
29 |
+
'Київська': {'avg_age': 40, 'urbanization': 0.8, 'tech_adoption': 0.7},
|
30 |
+
'Львівська': {'avg_age': 38, 'urbanization': 0.7, 'tech_adoption': 0.6},
|
31 |
+
'Харківська': {'avg_age': 42, 'urbanization': 0.8, 'tech_adoption': 0.65},
|
32 |
+
'Одеська': {'avg_age': 41, 'urbanization': 0.7, 'tech_adoption': 0.6},
|
33 |
+
'Дніпропетровська': {'avg_age': 43, 'urbanization': 0.75, 'tech_adoption': 0.6},
|
34 |
+
'Запорізька': {'avg_age': 44, 'urbanization': 0.7, 'tech_adoption': 0.55},
|
35 |
+
'Вінницька': {'avg_age': 42, 'urbanization': 0.6, 'tech_adoption': 0.5},
|
36 |
+
'Полтавська': {'avg_age': 43, 'urbanization': 0.65, 'tech_adoption': 0.55},
|
37 |
+
'Чернігівська': {'avg_age': 45, 'urbanization': 0.6, 'tech_adoption': 0.5},
|
38 |
+
'Сумська': {'avg_age': 44, 'urbanization': 0.65, 'tech_adoption': 0.5}
|
39 |
+
}
|
40 |
+
|
41 |
+
# Generate initial customer data
|
42 |
+
data = []
|
43 |
+
for i in range(NUM_CUSTOMERS):
|
44 |
+
customer_id = f"C{str(i+1).zfill(6)}"
|
45 |
+
|
46 |
+
# Region and City
|
47 |
+
region = np.random.choice(list(REGIONS.keys()))
|
48 |
+
region_info = REGIONS[region]
|
49 |
+
is_urban = np.random.random() < region_info['urbanization']
|
50 |
+
city = fake.city()
|
51 |
+
if not is_urban:
|
52 |
+
city = f"смт {city}"
|
53 |
+
|
54 |
+
# Age (dependent on region)
|
55 |
+
age = int(np.random.normal(region_info['avg_age'], 10))
|
56 |
+
age = max(18, min(80, age)) # Clamp between 18 and 80
|
57 |
+
|
58 |
+
# Gender (slight dependency on age and region)
|
59 |
+
gender_prob = 0.49 + 0.02 * (age - 40) / 40 # Slight increase in male probability with age
|
60 |
+
gender_prob += 0.02 * (region_info['urbanization'] - 0.7) / 0.3 # Slight increase in urban areas
|
61 |
+
gender = np.random.choice(['Male', 'Female', 'Other'], p=[gender_prob, 1-gender_prob-0.01, 0.01])
|
62 |
+
|
63 |
+
# Preferred Language (dependent on age and region)
|
64 |
+
ukrainian_prob = 0.8 - 0.2 * (age - 40) / 40 # Younger people more likely to prefer Ukrainian
|
65 |
+
ukrainian_prob += 0.1 * (1 - region_info['urbanization']) # Rural areas more likely to prefer Ukrainian
|
66 |
+
preferred_language = np.random.choice(['Ukrainian', 'Russian'], p=[min(1, max(0, ukrainian_prob)), 1-min(1, max(0, ukrainian_prob))])
|
67 |
+
|
68 |
+
# Registration date
|
69 |
+
registration_date = fake.date_between(start_date=START_DATE, end_date=END_DATE)
|
70 |
+
|
71 |
+
# Determine if the customer is active (has made orders)
|
72 |
+
is_active = np.random.random() < 0.6 # 60% chance of being an active customer
|
73 |
+
|
74 |
+
if is_active:
|
75 |
+
# Total orders and average order value (dependent on various factors)
|
76 |
+
base_orders = np.random.poisson(5)
|
77 |
+
order_multiplier = 1 + 0.2 * (age - 40) / 40 # Age factor
|
78 |
+
order_multiplier *= 1 + 0.1 * (region_info['tech_adoption'] - 0.6) / 0.2 # Tech adoption factor
|
79 |
+
order_multiplier *= 1.1 if gender == 'Female' else 0.9 # Gender factor
|
80 |
+
order_multiplier *= 1.1 if preferred_language == 'Ukrainian' else 0.9 # Language factor
|
81 |
+
total_orders = max(1, int(base_orders * order_multiplier)) # Ensure at least 1 order for active customers
|
82 |
+
|
83 |
+
base_aov = np.random.gamma(shape=5, scale=100)
|
84 |
+
aov_multiplier = 1 + 0.3 * (age - 40) / 40 # Age factor
|
85 |
+
aov_multiplier *= 1 + 0.2 * (region_info['urbanization'] - 0.7) / 0.3 # Urbanization factor
|
86 |
+
aov_multiplier *= 1.1 if gender == 'Male' else 0.9 # Gender factor
|
87 |
+
average_order_value = base_aov * aov_multiplier
|
88 |
+
|
89 |
+
# Last order date
|
90 |
+
last_order_date = fake.date_between(start_date=registration_date, end_date=END_DATE)
|
91 |
+
else:
|
92 |
+
total_orders = 0
|
93 |
+
average_order_value = 0
|
94 |
+
last_order_date = None
|
95 |
+
|
96 |
+
# Loyalty level based on total orders
|
97 |
+
loyalty_level = min(5, max(1, int(total_orders+1 / 2)))
|
98 |
+
|
99 |
+
# Newsletter subscription (dependent on age, loyalty, and tech adoption)
|
100 |
+
newsletter_prob = 0.5 + 0.1 * loyalty_level / 5 - 0.2 * (age - 40) / 40 + 0.2 * region_info['tech_adoption']
|
101 |
+
newsletter_subscription = np.random.random() < newsletter_prob
|
102 |
+
|
103 |
+
# Preferred payment method (dependent on age and urbanization)
|
104 |
+
payment_probs = [
|
105 |
+
0.5 - 0.2 * (age - 40) / 40 + 0.2 * region_info['urbanization'], # Credit Card
|
106 |
+
0.3 + 0.2 * (age - 40) / 40 - 0.2 * region_info['urbanization'], # Cash on Delivery
|
107 |
+
0.15, # Bank Transfer
|
108 |
+
0.05 + 0.1 * region_info['tech_adoption'] # PayPal
|
109 |
+
]
|
110 |
+
payment_probs = [max(0, min(p, 1)) for p in payment_probs]
|
111 |
+
payment_probs = [p / sum(payment_probs) for p in payment_probs]
|
112 |
+
preferred_payment_method = np.random.choice(
|
113 |
+
['Credit Card', 'Cash on Delivery', 'Bank Transfer', 'PayPal'],
|
114 |
+
p=payment_probs
|
115 |
+
)
|
116 |
+
|
117 |
+
# Main browsing device (dependent on age and tech adoption)
|
118 |
+
device_probs = [
|
119 |
+
0.4 + 0.3 * (age - 40) / 40 - 0.2 * region_info['tech_adoption'], # Web
|
120 |
+
0.4 - 0.2 * (age - 40) / 40 + 0.1 * region_info['tech_adoption'], # Mobile
|
121 |
+
0.2 - 0.1 * (age - 40) / 40 + 0.1 * region_info['tech_adoption'] # App
|
122 |
+
]
|
123 |
+
device_probs = [max(0, min(p, 1)) for p in device_probs]
|
124 |
+
device_probs = [p / sum(device_probs) for p in device_probs]
|
125 |
+
main_browsing_device = np.random.choice(['Web', 'Mobile', 'App'], p=device_probs)
|
126 |
+
|
127 |
+
# Product categories (dependent on age, gender, and browsing device)
|
128 |
+
all_categories = ['Electronics', 'Home Appliances', 'Computers', 'Smartphones', 'TV & Audio']
|
129 |
+
category_probs = [0.2] * 5
|
130 |
+
if age < 30:
|
131 |
+
category_probs[2] += 0.1 # Increase Computers
|
132 |
+
category_probs[3] += 0.1 # Increase Smartphones
|
133 |
+
elif age > 60:
|
134 |
+
category_probs[1] += 0.1 # Increase Home Appliances
|
135 |
+
category_probs[4] += 0.1 # Increase TV & Audio
|
136 |
+
if gender == 'Male':
|
137 |
+
category_probs[0] += 0.05 # Slight increase in Electronics
|
138 |
+
category_probs[2] += 0.05 # Slight increase in Computers
|
139 |
+
if main_browsing_device == 'Mobile':
|
140 |
+
category_probs[3] += 0.1 # Increase Smartphones
|
141 |
+
category_probs = [p / sum(category_probs) for p in category_probs]
|
142 |
+
num_categories = np.random.randint(1, 4)
|
143 |
+
product_categories = np.random.choice(all_categories, size=num_categories, replace=False, p=category_probs)
|
144 |
+
|
145 |
+
data.append({
|
146 |
+
'customer_id': customer_id,
|
147 |
+
'name': fake.name(),
|
148 |
+
'email': generate_email(fake.name()),
|
149 |
+
'age': age,
|
150 |
+
'gender': gender,
|
151 |
+
'region': region,
|
152 |
+
'city': city,
|
153 |
+
'registration_date': registration_date,
|
154 |
+
'phone_number': generate_phone_number(),
|
155 |
+
'preferred_language': preferred_language,
|
156 |
+
'newsletter_subscription': newsletter_subscription,
|
157 |
+
'preferred_payment_method': preferred_payment_method,
|
158 |
+
'loyalty_level': loyalty_level,
|
159 |
+
'main_browsing_device': main_browsing_device,
|
160 |
+
'product_categories_of_interest': ', '.join(product_categories),
|
161 |
+
'average_order_value': round(average_order_value, 2),
|
162 |
+
'total_orders': total_orders,
|
163 |
+
'last_order_date': last_order_date
|
164 |
+
})
|
165 |
+
|
166 |
+
# Create DataFrame
|
167 |
+
df = pd.DataFrame(data)
|
168 |
+
return df
|
169 |
+
|
170 |
+
if __name__ == "__main__":
|
171 |
+
df = generate_synthetic_data()
|
172 |
+
print(df.head())
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
rct_analyzer.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
|
4 |
+
def calculate_metrics(df):
|
5 |
+
total_customers = len(df['customer_id'].unique())
|
6 |
+
total_purchases = len(df)
|
7 |
+
total_revenue = df['discounted_price'].sum()
|
8 |
+
total_profit = df['profit'].sum()
|
9 |
+
conversion_rate = total_purchases / total_customers
|
10 |
+
average_order_value = total_revenue / total_purchases if total_purchases > 0 else 0
|
11 |
+
|
12 |
+
return pd.Series({
|
13 |
+
'Total Converted Customers': total_customers,
|
14 |
+
'Total Purchases': total_purchases,
|
15 |
+
'Total Revenue': total_revenue,
|
16 |
+
'Total Profit': total_profit,
|
17 |
+
'Conversion Rate': conversion_rate,
|
18 |
+
'Average Order Value': average_order_value
|
19 |
+
})
|
20 |
+
|
21 |
+
def analyze_rct_results(transactions_df, variant_assignments_df):
|
22 |
+
overall_metrics = calculate_metrics(transactions_df)
|
23 |
+
variant_metrics = transactions_df.groupby('variant').apply(calculate_metrics).reset_index()
|
24 |
+
|
25 |
+
# Calculate incremental metrics
|
26 |
+
control_metrics = variant_metrics[variant_metrics['variant'] == 'Control'].iloc[0]
|
27 |
+
variant_metrics['Incremental Purchases'] = variant_metrics['Total Purchases'] - control_metrics['Total Purchases']
|
28 |
+
variant_metrics['Incremental Profit'] = variant_metrics['Total Profit'] - control_metrics['Total Profit']
|
29 |
+
variant_metrics['Profit per Incremental Purchase'] = variant_metrics['Incremental Profit'] / variant_metrics['Incremental Purchases']
|
30 |
+
|
31 |
+
# Prepare overall metrics table
|
32 |
+
overall_df = pd.DataFrame([overall_metrics])
|
33 |
+
|
34 |
+
# Prepare variant metrics table
|
35 |
+
variant_order = ['Control', '5% discount', '10% discount', '15% discount']
|
36 |
+
variant_metrics['variant'] = pd.Categorical(variant_metrics['variant'], categories=variant_order, ordered=True)
|
37 |
+
variant_metrics = variant_metrics.sort_values('variant')
|
38 |
+
|
39 |
+
# # Create plots
|
40 |
+
# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12))
|
41 |
+
|
42 |
+
# # Create plots
|
43 |
+
fig, ax2 = plt.subplots(1, 1, figsize=(10, 6))
|
44 |
+
#
|
45 |
+
# Incremental Total Profit vs Incremental Total Purchases
|
46 |
+
non_control = variant_metrics[variant_metrics['variant'] != 'Control']
|
47 |
+
# ax1.scatter(non_control['Incremental Purchases'], non_control['Incremental Profit'])
|
48 |
+
# for _, row in non_control.iterrows():
|
49 |
+
# ax1.annotate(row['variant'], (row['Incremental Purchases'], row['Incremental Profit']))
|
50 |
+
# ax1.set_xlabel('Incremental Total Purchases')
|
51 |
+
# ax1.set_ylabel('Incremental Total Profit')
|
52 |
+
# ax1.set_title('Incremental Total Profit vs Incremental Total Purchases')
|
53 |
+
# ax1.axhline(y=0, color='r', linestyle='--')
|
54 |
+
# ax1.axvline(x=0, color='r', linestyle='--')
|
55 |
+
# ax1.grid(True, linestyle=':', alpha=0.7)
|
56 |
+
|
57 |
+
# # Incremental Total Profit per Incremental Purchases vs Incremental Total Purchases
|
58 |
+
ax2.scatter(non_control['Incremental Purchases'], non_control['Profit per Incremental Purchase'])
|
59 |
+
for _, row in non_control.iterrows():
|
60 |
+
ax2.annotate(row['variant'], (row['Incremental Purchases'], row['Profit per Incremental Purchase']))
|
61 |
+
ax2.set_xlabel('Incremental Total Purchases')
|
62 |
+
ax2.set_ylabel('Incremental Total Profit per Incremental Purchase')
|
63 |
+
ax2.set_title('Incremental Profit per Purchase vs Incremental Purchases')
|
64 |
+
ax2.axhline(y=0, color='r', linestyle='--')
|
65 |
+
ax2.axvline(x=0, color='r', linestyle='--')
|
66 |
+
ax2.grid(True, linestyle=':', alpha=0.7)
|
67 |
+
|
68 |
+
plt.tight_layout()
|
69 |
+
return overall_df, variant_metrics, fig
|
rct_simulator.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
from datetime import datetime, timedelta
|
4 |
+
import random
|
5 |
+
|
6 |
+
# Define the Electronics products
|
7 |
+
electronics_products = [
|
8 |
+
{"name": "4K Smart TV", "cost": 500, "price": 699},
|
9 |
+
{"name": "Wireless Headphones", "cost": 100, "price": 139},
|
10 |
+
{"name": "Gaming Console", "cost": 300, "price": 419},
|
11 |
+
{"name": "Digital Camera", "cost": 400, "price": 559},
|
12 |
+
{"name": "Bluetooth Speaker", "cost": 50, "price": 69},
|
13 |
+
{"name": "Smartwatch", "cost": 150, "price": 209},
|
14 |
+
{"name": "Laptop", "cost": 600, "price": 839},
|
15 |
+
{"name": "Tablet", "cost": 200, "price": 279},
|
16 |
+
{"name": "Drone", "cost": 250, "price": 349},
|
17 |
+
{"name": "Home Theater System", "cost": 350, "price": 489},
|
18 |
+
{"name": "E-reader", "cost": 80, "price": 109},
|
19 |
+
{"name": "Portable Power Bank", "cost": 30, "price": 41},
|
20 |
+
{"name": "Wireless Earbuds", "cost": 80, "price": 109},
|
21 |
+
{"name": "Action Camera", "cost": 150, "price": 209},
|
22 |
+
{"name": "Smart Home Hub", "cost": 70, "price": 97},
|
23 |
+
{"name": "Gaming Mouse", "cost": 40, "price": 55},
|
24 |
+
{"name": "External Hard Drive", "cost": 60, "price": 83},
|
25 |
+
{"name": "Graphic Tablet", "cost": 180, "price": 249},
|
26 |
+
{"name": "Noise-Canceling Headphones", "cost": 200, "price": 279},
|
27 |
+
{"name": "Portable Projector", "cost": 300, "price": 419}
|
28 |
+
]
|
29 |
+
|
30 |
+
# Define the RCT variants
|
31 |
+
variants = ['Control', '5% discount', '10% discount', '15% discount']
|
32 |
+
discount_rates = [0, 0.05, 0.10, 0.15]
|
33 |
+
|
34 |
+
# Function to calculate purchase probability with increased feature dependency
|
35 |
+
def calculate_purchase_probability(customer, discount, base_prob=0.1):
|
36 |
+
prob = base_prob
|
37 |
+
|
38 |
+
# Age factor (younger customers more sensitive to discounts)
|
39 |
+
age_factor = (60 - customer['age']) / 60
|
40 |
+
prob += 0.02 * age_factor
|
41 |
+
|
42 |
+
# Loyalty factor (more loyal customers less sensitive to discounts)
|
43 |
+
loyalty_factor = (6 - customer['loyalty_level']) / 5
|
44 |
+
prob += 0.02 * loyalty_factor
|
45 |
+
|
46 |
+
# Past behavior factor (customers with more orders more likely to buy, but less sensitive to discounts)
|
47 |
+
order_factor = min(customer['total_orders'] / 20, 1)
|
48 |
+
prob += 0.03 * order_factor
|
49 |
+
|
50 |
+
# Newsletter subscription factor (subscribed customers more sensitive to discounts)
|
51 |
+
if customer['newsletter_subscription']:
|
52 |
+
prob += 0.03
|
53 |
+
|
54 |
+
# Browsing device factor (mobile and app users more sensitive to discounts)
|
55 |
+
if customer['main_browsing_device'] == 'Mobile':
|
56 |
+
prob += 0.02
|
57 |
+
elif customer['main_browsing_device'] == 'App':
|
58 |
+
prob += 0.03
|
59 |
+
|
60 |
+
# Average order value factor (higher AOV customers less sensitive to discounts)
|
61 |
+
aov_factor = min(customer['average_order_value'] / 1000, 1)
|
62 |
+
prob -= 0.02 * aov_factor
|
63 |
+
|
64 |
+
# Gender factor (assume slightly different sensitivity to discounts)
|
65 |
+
if customer['gender'] == 'Female':
|
66 |
+
prob += 0.01
|
67 |
+
elif customer['gender'] == 'Male':
|
68 |
+
prob -= 0.01
|
69 |
+
|
70 |
+
# Preferred payment method factor
|
71 |
+
if customer['preferred_payment_method'] == 'Credit Card':
|
72 |
+
prob += 0.02 # Credit card users might be more likely to make impulse purchases
|
73 |
+
|
74 |
+
# Adjust probability based on discount with increased sensitivity
|
75 |
+
discount_sensitivity = 1 + age_factor - loyalty_factor + (0.5 if customer['newsletter_subscription'] else 0)
|
76 |
+
if discount == 0.05:
|
77 |
+
prob *= (1 + discount * 3.5 * discount_sensitivity)
|
78 |
+
elif discount == 0.1:
|
79 |
+
prob *= (1 + discount * 4.5 * discount_sensitivity)
|
80 |
+
elif discount == 0.15:
|
81 |
+
prob *= (1 + discount * 4.3 * discount_sensitivity)
|
82 |
+
|
83 |
+
return min(max(prob, 0), 1) # Ensure probability is between 0 and 1
|
84 |
+
|
85 |
+
# Function to simulate purchases
|
86 |
+
def simulate_purchase(customer, variant_index, product):
|
87 |
+
discount = discount_rates[variant_index]
|
88 |
+
prob = calculate_purchase_probability(customer, discount)
|
89 |
+
|
90 |
+
if np.random.random() < prob:
|
91 |
+
discounted_price = product['price'] * (1 - discount)
|
92 |
+
return {
|
93 |
+
'customer_id': customer['customer_id'],
|
94 |
+
'variant': variants[variant_index],
|
95 |
+
'product': product['name'],
|
96 |
+
'price': product['price'],
|
97 |
+
'discounted_price': discounted_price,
|
98 |
+
'cost': product['cost'],
|
99 |
+
'profit': discounted_price - product['cost']
|
100 |
+
}
|
101 |
+
return None
|
102 |
+
|
103 |
+
def run_rct_simulation(df, experiment_duration=30):
|
104 |
+
# Set random seed for reproducibility
|
105 |
+
np.random.seed(42)
|
106 |
+
random.seed(42)
|
107 |
+
|
108 |
+
# Set up experiment dates
|
109 |
+
start_date = datetime(2024, 7, 1)
|
110 |
+
end_date = start_date + timedelta(days=experiment_duration)
|
111 |
+
|
112 |
+
results = []
|
113 |
+
variant_assignments = [] # New list to store variant assignments
|
114 |
+
|
115 |
+
for _, customer in df.iterrows():
|
116 |
+
variant_index = np.random.randint(0, 4) # Randomly assign variant
|
117 |
+
|
118 |
+
# Record variant assignment for all eligible customers
|
119 |
+
variant_assignments.append({
|
120 |
+
'customer_id': customer['customer_id'],
|
121 |
+
'variant': variants[variant_index]
|
122 |
+
})
|
123 |
+
|
124 |
+
# Simulate multiple purchase opportunities
|
125 |
+
for _ in range(round(experiment_duration/10)):
|
126 |
+
product = random.choice(electronics_products)
|
127 |
+
purchase = simulate_purchase(customer, variant_index, product)
|
128 |
+
if purchase:
|
129 |
+
results.append(purchase)
|
130 |
+
|
131 |
+
# Create DataFrame from results
|
132 |
+
transactions_df = pd.DataFrame(results)
|
133 |
+
transactions_df['purchase'] = 1
|
134 |
+
|
135 |
+
# Create DataFrame from variant assignments
|
136 |
+
variant_assignments_df = pd.DataFrame(variant_assignments)
|
137 |
+
|
138 |
+
return transactions_df, variant_assignments_df
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
plotly==5.14.1
|
2 |
+
gradio==3.50.2
|
3 |
+
pandas==1.5.3
|
4 |
+
numpy==1.24.3
|
5 |
+
faker==18.9.0
|
6 |
+
matplotlib==3.7.1
|
7 |
+
tabulate==0.8.10
|
8 |
+
causalml
|
9 |
+
scikit-learn
|