-
Notifications
You must be signed in to change notification settings - Fork 344
/
demo.R
665 lines (566 loc) · 28.6 KB
/
demo.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#############################################################################################
#################### Meta MMM Open Source: Robyn 3.11.0 #######################
#################### Quick demo guide #######################
#############################################################################################
# Advanced marketing mix modeling using Meta Open Source project Robyn (Blueprint training)
# https://www.facebookblueprint.com/student/path/253121-marketing-mix-models?utm_source=demo
################################################################
#### Step 0: Setup environment
## Install, load, and check (latest) Robyn version, using one of these 2 sources:
## A) Install the latest stable version from CRAN:
# install.packages("Robyn")
## B) Install the latest dev version from GitHub:
# install.packages("remotes") # Install remotes first if you haven't already
# remotes::install_github("facebookexperimental/Robyn/R")
library(Robyn)
# Please, check if you have installed the latest version before running this demo. Update if not
# https://github.com/facebookexperimental/Robyn/blob/main/R/DESCRIPTION#L4
packageVersion("Robyn")
# Also, if you're using an older version than the latest dev version, please check older demo.R with
# https://github.com/facebookexperimental/Robyn/blob/vX.X.X/demo/demo.R
## Force multi-core use when running RStudio
Sys.setenv(R_FUTURE_FORK_ENABLE = "true")
options(future.fork.enable = TRUE)
# Set to FALSE to avoid the creation of files locally
create_files <- TRUE
## IMPORTANT: Must install and setup the python library "Nevergrad" once before using Robyn
## Guide: https://github.com/facebookexperimental/Robyn/blob/main/demo/install_nevergrad.R
################################################################
#### Step 1: Load data
## Check simulated dataset or load your own dataset
data("dt_simulated_weekly")
head(dt_simulated_weekly)
## Check holidays from Prophet
# 59 countries included. If your country is not included, please manually add it.
# Tipp: any events can be added into this table, school break, events etc.
data("dt_prophet_holidays")
head(dt_prophet_holidays)
# Directory where you want to export results to (will create new folders)
robyn_directory <- "~/Desktop"
################################################################
#### Step 2a: For first time user: Model specification in 4 steps
#### 2a-1: First, specify input variables
## All sign control are now automatically provided: "positive" for media & organic
## variables and "default" for all others. User can still customise signs if necessary.
## Documentation is available, access it anytime by running: ?robyn_inputs
InputCollect <- robyn_inputs(
dt_input = dt_simulated_weekly,
dt_holidays = dt_prophet_holidays,
date_var = "DATE", # date format must be "2020-01-01"
dep_var = "revenue", # there should be only one dependent variable
dep_var_type = "revenue", # "revenue" (ROI) or "conversion" (CPA)
prophet_vars = c("trend", "season", "holiday"), # "trend","season", "weekday" & "holiday"
prophet_country = "DE", # input country code. Check: dt_prophet_holidays
context_vars = c("competitor_sales_B", "events"), # e.g. competitors, discount, unemployment etc
paid_media_spends = c("tv_S", "ooh_S", "print_S", "facebook_S", "search_S"), # mandatory input
paid_media_vars = c("tv_S", "ooh_S", "print_S", "facebook_I", "search_clicks_P"), # mandatory.
# paid_media_vars must have same order as paid_media_spends. Use media exposure metrics like
# impressions, GRP etc. If not applicable, use spend instead.
organic_vars = "newsletter", # marketing activity without media spend
# factor_vars = c("events"), # force variables in context_vars or organic_vars to be categorical
window_start = "2016-01-01",
window_end = "2018-12-31",
adstock = "geometric" # geometric, weibull_cdf or weibull_pdf.
)
print(InputCollect)
#### 2a-2: Second, define and add hyperparameters
## Default media variable for modelling has changed from paid_media_vars to paid_media_spends.
## Also, calibration_input are required to be spend names.
## hyperparameter names are based on paid_media_spends names too. See right hyperparameter names:
hyper_names(adstock = InputCollect$adstock, all_media = InputCollect$all_media)
## Guide to setup & understand hyperparameters
## Robyn's hyperparameters have four components:
## - Adstock parameters (theta or shape/scale)
## - Saturation parameters (alpha/gamma)
## - Regularisation parameter (lambda). No need to specify manually
## - Time series validation parameter (train_size)
## 1. IMPORTANT: set plot = TRUE to create example plots for adstock & saturation
## hyperparameters and their influence in curve transformation.
plot_adstock(plot = FALSE)
plot_saturation(plot = FALSE)
## 2. Get correct hyperparameter names:
# All variables in paid_media_spends and organic_vars require hyperparameter and will be
# transformed by adstock & saturation.
# Run hyper_names(adstock = InputCollect$adstock, all_media = InputCollect$all_media)
# to get correct media hyperparameter names. All names in hyperparameters must equal
# names from hyper_names(), case sensitive. Run ?hyper_names to check function arguments.
## 3. Hyperparameter interpretation & recommendation:
## Geometric adstock: Theta is the only parameter and means fixed decay rate. Assuming TV
# spend on day 1 is 100€ and theta = 0.7, then day 2 has 100*0.7=70€ worth of effect
# carried-over from day 1, day 3 has 70*0.7=49€ from day 2 etc. Rule-of-thumb for common
# media genre: TV c(0.3, 0.8), OOH/Print/Radio c(0.1, 0.4), digital c(0, 0.3). Also,
# to convert weekly to daily we can transform the parameter to the power of (1/7),
# so to convert 30% daily to weekly is 0.3^(1/7) = 0.84.
## Weibull CDF adstock: The Cumulative Distribution Function of Weibull has two parameters,
# shape & scale, and has flexible decay rate, compared to Geometric adstock with fixed
# decay rate. The shape parameter controls the shape of the decay curve. Recommended
# bound is c(0, 2). The larger the shape, the more S-shape. The smaller, the more
# L-shape. Scale controls the inflexion point of the decay curve. We recommend very
# conservative bounce of c(0, 0.1), because scale increases the adstock half-life greatly.
# When shape or scale is 0, adstock will be 0.
## Weibull PDF adstock: The Probability Density Function of the Weibull also has two
# parameters, shape & scale, and also has flexible decay rate as Weibull CDF. The
# difference is that Weibull PDF offers lagged effect. When shape > 2, the curve peaks
# after x = 0 and has NULL slope at x = 0, enabling lagged effect and sharper increase and
# decrease of adstock, while the scale parameter indicates the limit of the relative
# position of the peak at x axis; when 1 < shape < 2, the curve peaks after x = 0 and has
# infinite positive slope at x = 0, enabling lagged effect and slower increase and decrease
# of adstock, while scale has the same effect as above; when shape = 1, the curve peaks at
# x = 0 and reduces to exponential decay, while scale controls the inflexion point; when
# 0 < shape < 1, the curve peaks at x = 0 and has increasing decay, while scale controls
# the inflexion point. When all possible shapes are relevant, we recommend c(0.0001, 10)
# as bounds for shape; when only strong lagged effect is of interest, we recommend
# c(2.0001, 10) as bound for shape. In all cases, we recommend conservative bound of
# c(0, 0.1) for scale. Due to the great flexibility of Weibull PDF, meaning more freedom
# in hyperparameter spaces for Nevergrad to explore, it also requires larger iterations
# to converge. When shape or scale is 0, adstock will be 0.
## Hill function for saturation: Hill function is a two-parametric function in Robyn with
# alpha and gamma. Alpha controls the shape of the curve between exponential and s-shape.
# Recommended bound is c(0.5, 3). The larger the alpha, the more S-shape. The smaller, the
# more C-shape. Gamma controls the inflexion point. Recommended bounce is c(0.3, 1). The
# larger the gamma, the later the inflection point in the response curve.
## Regularization for ridge regression: Lambda is the penalty term for regularised regression.
# Lambda doesn't need manual definition from the users, because it is set to the range of
# c(0, 1) by default in hyperparameters and will be scaled to the proper altitude with
# lambda_max and lambda_min_ratio.
## Time series validation: When ts_validation = TRUE in robyn_run(), train_size defines the
# percentage of data used for training, validation and out-of-sample testing. For example,
# when train_size = 0.7, val_size and test_size will be 0.15 each. This hyperparameter is
# customizable with default range of c(0.5, 0.8) and must be between c(0.1, 1).
## 4. Set individual hyperparameter bounds. They either contain two values e.g. c(0, 0.5),
# or only one value, in which case you'd "fix" that hyperparameter.
# Run hyper_limits() to check maximum upper and lower bounds by range
hyper_limits()
# Example hyperparameters ranges for Geometric adstock
hyperparameters <- list(
facebook_S_alphas = c(0.5, 3),
facebook_S_gammas = c(0.3, 1),
facebook_S_thetas = c(0, 0.3),
print_S_alphas = c(0.5, 3),
print_S_gammas = c(0.3, 1),
print_S_thetas = c(0.1, 0.4),
tv_S_alphas = c(0.5, 3),
tv_S_gammas = c(0.3, 1),
tv_S_thetas = c(0.3, 0.8),
search_S_alphas = c(0.5, 3),
search_S_gammas = c(0.3, 1),
search_S_thetas = c(0, 0.3),
ooh_S_alphas = c(0.5, 3),
ooh_S_gammas = c(0.3, 1),
ooh_S_thetas = c(0.1, 0.4),
newsletter_alphas = c(0.5, 3),
newsletter_gammas = c(0.3, 1),
newsletter_thetas = c(0.1, 0.4),
train_size = c(0.5, 0.8)
)
# Example hyperparameters ranges for Weibull CDF adstock
# facebook_S_alphas = c(0.5, 3)
# facebook_S_gammas = c(0.3, 1)
# facebook_S_shapes = c(0, 2)
# facebook_S_scales = c(0, 0.1)
# Example hyperparameters ranges for Weibull PDF adstock
# facebook_S_alphas = c(0.5, 3)
# facebook_S_gammas = c(0.3, 1)
# facebook_S_shapes = c(0, 10)
# facebook_S_scales = c(0, 0.1)
#### 2a-3: Third, add hyperparameters into robyn_inputs()
InputCollect <- robyn_inputs(InputCollect = InputCollect, hyperparameters = hyperparameters)
print(InputCollect)
#### 2a-4: Fourth (optional), model calibration / add experimental input
## Guide for calibration
# 1. Calibration channels need to be paid_media_spends or organic_vars names.
# 2. We strongly recommend to use Weibull PDF adstock for more degree of freedom when
# calibrating Robyn.
# 3. We strongly recommend to use experimental and causal results that are considered
# ground truth to calibrate MMM. Usual experiment types are identity-based (e.g. Facebook
# conversion lift) or geo-based (e.g. Facebook GeoLift). Due to the nature of treatment
# and control groups in an experiment, the result is considered immediate effect. It's
# rather impossible to hold off historical carryover effect in an experiment. Therefore,
# only calibrates the immediate and the future carryover effect. When calibrating with
# causal experiments, use calibration_scope = "immediate".
# 4. It's controversial to use attribution/MTA contribution to calibrate MMM. Attribution
# is considered biased towards lower-funnel channels and strongly impacted by signal
# quality. When calibrating with MTA, use calibration_scope = "immediate".
# 5. Every MMM is different. It's highly contextual if two MMMs are comparable or not.
# In case of using other MMM result to calibrate Robyn, use calibration_scope = "total".
# 6. Currently, Robyn only accepts point-estimate as calibration input. For example, if
# 10k$ spend is tested against a hold-out for channel A, then input the incremental
# return as point-estimate as the example below.
# 7. The point-estimate has to always match the spend in the variable. For example, if
# channel A usually has $100K weekly spend and the experimental holdout is 70%, input
# the point-estimate for the $30K, not the $70K.
# 8. If an experiment contains more than one media variable, input "channe_A+channel_B"
# to indicate combination of channels, case sensitive.
# calibration_input <- data.frame(
# # channel name must in paid_media_vars
# channel = c("facebook_S", "tv_S", "facebook_S+search_S", "newsletter"),
# # liftStartDate must be within input data range
# liftStartDate = as.Date(c("2018-05-01", "2018-04-03", "2018-07-01", "2017-12-01")),
# # liftEndDate must be within input data range
# liftEndDate = as.Date(c("2018-06-10", "2018-06-03", "2018-07-20", "2017-12-31")),
# # Provided value must be tested on same campaign level in model and same metric as dep_var_type
# liftAbs = c(400000, 300000, 700000, 200),
# # Spend within experiment: should match within a 10% error your spend on date range for each channel from dt_input
# spend = c(421000, 7100, 350000, 0),
# # Confidence: if frequentist experiment, you may use 1 - pvalue
# confidence = c(0.85, 0.8, 0.99, 0.95),
# # KPI measured: must match your dep_var
# metric = c("revenue", "revenue", "revenue", "revenue"),
# # Either "immediate" or "total". For experimental inputs like Facebook Lift, "immediate" is recommended.
# calibration_scope = c("immediate", "immediate", "immediate", "immediate")
# )
# InputCollect <- robyn_inputs(InputCollect = InputCollect, calibration_input = calibration_input)
################################################################
#### Step 2b: For known model specification, setup in one single step
## Specify hyperparameters as in 2a-2 and optionally calibration as in 2a-4 and provide them directly in robyn_inputs()
# InputCollect <- robyn_inputs(
# dt_input = dt_simulated_weekly
# ,dt_holidays = dt_prophet_holidays
# ,date_var = "DATE"
# ,dep_var = "revenue"
# ,dep_var_type = "revenue"
# ,prophet_vars = c("trend", "season", "holiday")
# ,prophet_country = "DE"
# ,context_vars = c("competitor_sales_B", "events")
# ,paid_media_spends = c("tv_S", "ooh_S", "print_S", "facebook_S", "search_S")
# ,paid_media_vars = c("tv_S", "ooh_S", "print_S", "facebook_I", "search_clicks_P")
# ,organic_vars = c("newsletter")
# ,factor_vars = c("events")
# ,window_start = "2016-11-23"
# ,window_end = "2018-08-22"
# ,adstock = "geometric"
# ,hyperparameters = hyperparameters # as in 2a-2 above
# ,calibration_input = calibration_input # as in 2a-4 above
# )
#### Check spend exposure fit if available
if (length(InputCollect$exposure_vars) > 0) {
lapply(InputCollect$modNLS$plots, plot)
}
##### Manually save and import InputCollect as JSON file
# robyn_write(InputCollect, dir = "~/Desktop")
# InputCollect <- robyn_inputs(
# dt_input = dt_simulated_weekly,
# dt_holidays = dt_prophet_holidays,
# json_file = "~/Desktop/RobynModel-inputs.json")
################################################################
#### Step 3: Build initial model
## Run all trials and iterations. Use ?robyn_run to check parameter definition
OutputModels <- robyn_run(
InputCollect = InputCollect, # feed in all model specification
cores = NULL, # NULL defaults to (max available - 1)
iterations = 2000, # 2000 recommended for the dummy dataset with no calibration
trials = 5, # 5 recommended for the dummy dataset
ts_validation = TRUE, # 3-way-split time series for NRMSE validation.
add_penalty_factor = FALSE # Experimental feature. Use with caution.
)
print(OutputModels)
## Check MOO (multi-objective optimization) convergence plots
# Read more about convergence rules: ?robyn_converge
OutputModels$convergence$moo_distrb_plot
OutputModels$convergence$moo_cloud_plot
## Check time-series validation plot (when ts_validation == TRUE)
# Read more and replicate results: ?ts_validation
if (OutputModels$ts_validation) OutputModels$ts_validation_plot
## Calculate Pareto fronts, cluster and export results and plots. See ?robyn_outputs
OutputCollect <- robyn_outputs(
InputCollect, OutputModels,
pareto_fronts = "auto", # automatically pick how many pareto-fronts to fill min_candidates (100)
# min_candidates = 100, # top pareto models for clustering. Default to 100
# calibration_constraint = 0.1, # range c(0.01, 0.1) & default at 0.1
csv_out = "pareto", # "pareto", "all", or NULL (for none)
clusters = TRUE, # Set to TRUE to cluster similar models by ROAS. See ?robyn_clusters
export = create_files, # this will create files locally
plot_folder = robyn_directory, # path for plots exports and files creation
plot_pareto = create_files # Set to FALSE to deactivate plotting and saving model one-pagers
)
print(OutputCollect)
## 4 csv files are exported into the folder for further usage. Check schema here:
## https://github.com/facebookexperimental/Robyn/blob/main/demo/schema.R
# pareto_hyperparameters.csv, hyperparameters per Pareto output model
# pareto_aggregated.csv, aggregated decomposition per independent variable of all Pareto output
# pareto_media_transform_matrix.csv, all media transformation vectors
# pareto_alldecomp_matrix.csv, all decomposition vectors of independent variables
################################################################
#### Step 4: Select and save the any model
## Compare all model one-pagers and select one that mostly reflects your business reality
print(OutputCollect)
select_model <- "1_122_7" # Pick one of the models from OutputCollect to proceed
#### Version >=3.7.1: JSON export and import (faster and lighter than RDS files)
ExportedModel <- robyn_write(InputCollect, OutputCollect, select_model, export = create_files)
print(ExportedModel)
# To plot any model's one-pager:
myOnePager <- robyn_onepagers(InputCollect, OutputCollect, select_model, export = FALSE)
# To check each of the one-pager's plots
# myOnePager[[select_model]]$patches$plots[[1]]
# myOnePager[[select_model]]$patches$plots[[2]]
# myOnePager[[select_model]]$patches$plots[[3]] # ...
################################################################
#### Step 5: Get budget allocation based on the selected model above
## Budget allocation result requires further validation. Please use this recommendation with caution.
## Don't interpret budget allocation result if selected model above doesn't meet business expectation.
# Check media summary for selected model
print(ExportedModel)
# Run ?robyn_allocator to check parameter definition
# NOTE: The order of constraints should follow:
InputCollect$paid_media_spends
# Scenario "max_response": "What's the max. return given certain spend?"
# Example 1: max_response default setting: maximize response for latest month
AllocatorCollect1 <- robyn_allocator(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
# date_range = "all", # Default to "all"
# total_budget = NULL, # When NULL, default is total spend in date_range
channel_constr_low = 0.7,
channel_constr_up = c(1.2, 1.5, 1.5, 1.5, 1.5),
# channel_constr_multiplier = 3,
scenario = "max_response",
export = create_files
)
# Print & plot allocator's output
print(AllocatorCollect1)
plot(AllocatorCollect1)
# Example 2: maximize response for latest 10 periods with given spend
AllocatorCollect2 <- robyn_allocator(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
date_range = "last_10", # Last 10 periods, same as c("2018-10-22", "2018-12-31")
total_budget = 5000000, # Total budget for date_range period simulation
channel_constr_low = c(0.8, 0.7, 0.7, 0.7, 0.7),
channel_constr_up = c(1.2, 1.5, 1.5, 1.5, 1.5),
channel_constr_multiplier = 5, # Customise bound extension for wider insights
scenario = "max_response",
export = create_files
)
print(AllocatorCollect2)
plot(AllocatorCollect2)
# Scenario "target_efficiency": "How much to spend to hit ROAS or CPA of x?"
# Example 3: Use default ROAS target for revenue or CPA target for conversion
# Check InputCollect$dep_var_type for revenue or conversion type
# Two default ROAS targets: 0.8x of initial ROAS as well as ROAS = 1
# Two default CPA targets: 1.2x and 2.4x of the initial CPA
AllocatorCollect3 <- robyn_allocator(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
# date_range = NULL, # Default: "all" available dates
scenario = "target_efficiency",
# target_value = 2, # Customize target ROAS or CPA value
export = create_files
)
print(AllocatorCollect3)
plot(AllocatorCollect3)
# Example 4: Customize target_value for ROAS or CPA (using json_file)
json_file = "~/Desktop/Robyn_202302221206_init/RobynModel-1_117_11.json"
AllocatorCollect4 <- robyn_allocator(
json_file = json_file, # Using json file from robyn_write() for allocation
dt_input = dt_simulated_weekly,
dt_holidays = dt_prophet_holidays,
date_range = NULL, # Default last month as initial period
scenario = "target_efficiency",
target_value = 2, # Customize target ROAS or CPA value
plot_folder = "~/Desktop/my_dir",
plot_folder_sub = "my_subdir",
export = create_files
)
## A csv is exported into the folder for further usage. Check schema here:
## https://github.com/facebookexperimental/Robyn/blob/main/demo/schema.R
## QA optimal response
# Pick any media variable: InputCollect$all_media
select_media <- "search_S"
# For paid_media_spends set metric_value as your optimal spend
metric_value <- AllocatorCollect1$dt_optimOut$optmSpendUnit[
AllocatorCollect1$dt_optimOut$channels == select_media
]; metric_value
# # For paid_media_vars and organic_vars, manually pick a value
# metric_value <- 10000
## Saturation curve for adstocked metric results (example)
robyn_response(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
metric_name = select_media,
metric_value = metric_value,
date_range = "last_5"
)
################################################################
#### Step 6: Model refresh based on selected model and saved results
## Must run robyn_write() (manually or automatically) to export any model first, before refreshing.
## The robyn_refresh() function is suitable for updating within "reasonable periods".
## Two situations are considered better to rebuild model:
## 1. most data is new. If initial model has 100 weeks and 80 weeks new data is added in refresh,
## it might be better to rebuild the model. Rule of thumb: 50% of data or less can be new.
## 2. new variables are added.
# Provide JSON file with your InputCollect and ExportedModel specifications
# It can be any model, initial or a refresh model
json_file <- "~/Desktop/Robyn_202211211853_init/RobynModel-1_100_6.json"
RobynRefresh <- robyn_refresh(
json_file = json_file,
dt_input = dt_simulated_weekly,
dt_holidays = dt_prophet_holidays,
refresh_steps = 13,
refresh_iters = 1000, # 1k is an estimation
refresh_trials = 1
)
# Now refreshing a refreshed model, following the same approach
json_file_rf1 <- "~/Desktop/Robyn_202208231837_init/Robyn_202208231841_rf1/RobynModel-1_12_5.json"
RobynRefresh <- robyn_refresh(
json_file = json_file_rf1,
dt_input = dt_simulated_weekly,
dt_holidays = dt_prophet_holidays,
refresh_steps = 7,
refresh_iters = 1000, # 1k is an estimation
refresh_trials = 1
)
# Continue with refreshed new InputCollect, OutputCollect, select_model values
InputCollectX <- RobynRefresh$listRefresh1$InputCollect
OutputCollectX <- RobynRefresh$listRefresh1$OutputCollect
select_modelX <- RobynRefresh$listRefresh1$OutputCollect$selectID
## Besides plots: there are 4 CSV outputs saved in the folder for further usage
# report_hyperparameters.csv, hyperparameters of all selected model for reporting
# report_aggregated.csv, aggregated decomposition per independent variable
# report_media_transform_matrix.csv, all media transformation vectors
# report_alldecomp_matrix.csv,all decomposition vectors of independent variables
################################################################
#### Step 7: get marginal returns
## Example of how to get marginal ROI of next 1000$ from the 80k spend level for search channel
# Run ?robyn_response to check parameter definition
## The robyn_response() function can now output response for both spends and exposures (imps,
## GRP, newsletter sendings etc.) as well as plotting individual saturation curves. New
## argument names "metric_name" and "metric_value" instead of "paid_media_var" and "spend"
## are now used to accommodate this change. Also the returned output is a list now and
## contains also the plot.
## Recreate original saturation curve
Response <- robyn_response(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
metric_name = "facebook_S"
)
Response$plot
## Or you can call a JSON file directly (a bit slower)
# Response <- robyn_response(
# json_file = "your_json_path.json",
# dt_input = dt_simulated_weekly,
# dt_holidays = dt_prophet_holidays,
# metric_name = "facebook_S"
# )
## Get the "next 100 dollar" marginal response on Spend1
Spend1 <- 20000
Response1 <- robyn_response(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
metric_name = "facebook_S",
metric_value = Spend1, # total budget for date_range
date_range = "last_1" # last two periods
)
Response1$plot
Spend2 <- Spend1 + 100
Response2 <- robyn_response(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
metric_name = "facebook_S",
metric_value = Spend2,
date_range = "last_1"
)
# ROAS for the 100$ from Spend1 level
(Response2$response_total - Response1$response_total) / (Spend2 - Spend1)
## Get response from for a given budget and date_range
Spend3 <- 100000
Response3 <- robyn_response(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
metric_name = "facebook_S",
metric_value = Spend3, # total budget for date_range
date_range = "last_5" # last 5 periods
)
Response3$plot
## Example of getting paid media exposure response curves
# imps <- 10000000
# response_imps <- robyn_response(
# InputCollect = InputCollect,
# OutputCollect = OutputCollect,
# select_model = select_model,
# metric_name = "facebook_I",
# metric_value = imps
# )
# response_imps$response_total / imps * 1000
# response_imps$plot
## Example of getting organic media exposure response curves
sendings <- 30000
response_sending <- robyn_response(
InputCollect = InputCollect,
OutputCollect = OutputCollect,
select_model = select_model,
metric_name = "newsletter",
metric_value = sendings
)
# response per 1000 sendings
response_sending$response_total / sendings * 1000
response_sending$plot
################################################################
#### Optional: recreate old models and replicate results
# From an exported JSON file (which is created automatically when exporting a model)
# we can re-create a previously trained model and outputs. Note: we need to provide
# the main dataset and the holidays dataset, which are NOT stored in the JSON file.
# These JSON files will be automatically created in most cases.
############ WRITE ############
# Manually create JSON file with inputs data only
robyn_write(InputCollect, dir = "~/Desktop")
# Manually create JSON file with inputs and specific model results
robyn_write(InputCollect, OutputCollect, select_model)
############ READ ############
# Recreate `InputCollect` and `OutputCollect` objects
# Pick any exported model (initial or refreshed)
json_file <- "~/Desktop/Robyn_202208231837_init/RobynModel-1_100_6.json"
# Optional: Manually read and check data stored in file
json_data <- robyn_read(json_file)
print(json_data)
# Re-create InputCollect
InputCollectX <- robyn_inputs(
dt_input = dt_simulated_weekly,
dt_holidays = dt_prophet_holidays,
json_file = json_file)
# Re-create OutputCollect
OutputCollectX <- robyn_run(
InputCollect = InputCollectX,
json_file = json_file,
export = create_files)
# Or re-create both by simply using robyn_recreate()
RobynRecreated <- robyn_recreate(
json_file = "~/Desktop/Robyn_202303131448_init/RobynModel-1_103_7.json",
dt_input = dt_simulated_weekly,
dt_holidays = dt_prophet_holidays,
quiet = FALSE)
InputCollectX <- RobynRecreated$InputCollect
OutputCollectX <- RobynRecreated$OutputCollect
# Re-export or rebuild a model and check summary
myModel <- robyn_write(InputCollectX, OutputCollectX, export = FALSE, dir = "~/Desktop")
print(myModel)
# Re-create one-pager
myModelPlot <- robyn_onepagers(InputCollectX, OutputCollectX, export = FALSE)
# myModelPlot[[1]]$patches$plots[[7]]
# Refresh any imported model
RobynRefresh <- robyn_refresh(
json_file = json_file,
dt_input = InputCollectX$dt_input,
dt_holidays = InputCollectX$dt_holidays,
refresh_steps = 6,
refresh_mode = "manual",
refresh_iters = 1000,
refresh_trials = 1
)
# Recreate response curves
robyn_response(
InputCollect = InputCollectX,
OutputCollect = OutputCollectX,
metric_name = "newsletter",
metric_value = 50000
)