Skip to content

Commit

Permalink
Merge branch 'main' into cetagostini/multidimensionalmediamix
Browse files Browse the repository at this point in the history
  • Loading branch information
cetagostini authored Feb 3, 2025
2 parents 93e2d56 + 0750eba commit 75ef423
Show file tree
Hide file tree
Showing 14 changed files with 182 additions and 82 deletions.
3 changes: 3 additions & 0 deletions .github/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ changelog:
- title: Major Changes 🛠
labels:
- major
- title: Deprecations 🚨
labels:
- deprecation
- title: New Features 🎉
labels:
- enhancement
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1236,7 +1236,7 @@
"initial_budget_scenario[\"t\"] = 0\n",
"\n",
"response_initial_budget = mmm.sample_posterior_predictive(\n",
" X_pred=initial_budget_scenario, extend_idata=False\n",
" initial_budget_scenario, extend_idata=False\n",
")\n",
"\n",
"response_initial_budget"
Expand Down
4 changes: 2 additions & 2 deletions docs/source/notebooks/mmm/mmm_case_study.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6285,7 +6285,7 @@
"outputs": [],
"source": [
"y_pred_test = mmm.sample_posterior_predictive(\n",
" X_pred=X_test,\n",
" X_test,\n",
" include_last_observations=True,\n",
" original_scale=True,\n",
" var_names=[\"y\", \"channel_contributions\"],\n",
Expand Down Expand Up @@ -6942,7 +6942,7 @@
" X_actual_uniform[control] = 0.0\n",
"\n",
"pred_test_uniform = mmm.sample_posterior_predictive(\n",
" X_pred=X_actual_uniform,\n",
" X_actual_uniform,\n",
" include_last_observations=True,\n",
" original_scale=True,\n",
" var_names=[\"y\", \"channel_contributions\"],\n",
Expand Down
6 changes: 3 additions & 3 deletions docs/source/notebooks/mmm/mmm_counterfactuals.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -701,7 +701,7 @@
],
"source": [
"y_forecast = mmm.sample_posterior_predictive(\n",
" X_pred=X_forecast, extend_idata=False, include_last_observations=True\n",
" X_forecast, extend_idata=False, include_last_observations=True\n",
")"
]
},
Expand Down Expand Up @@ -958,7 +958,7 @@
],
"source": [
"y_intervention = mmm.sample_posterior_predictive(\n",
" X_pred=X_intervention, extend_idata=False, include_last_observations=True\n",
" X_intervention, extend_idata=False, include_last_observations=True\n",
")"
]
},
Expand Down Expand Up @@ -1289,7 +1289,7 @@
],
"source": [
"y_counterfactual = mmm.sample_posterior_predictive(\n",
" X_pred=X_counterfactual, extend_idata=False\n",
" X_counterfactual, extend_idata=False\n",
");"
]
},
Expand Down
8 changes: 3 additions & 5 deletions docs/source/notebooks/mmm/mmm_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -9661,7 +9661,7 @@
"- `sample_posterior_predictive` : Get the full posterior predictive distribution\n",
"- `predict`: Get the mean of the posterior predictive distribution\n",
"\n",
"These methods take new data, `X_pred`, and some additional `kwargs` for new predictions. Namely, \n",
"These methods take new data, `X`, and some additional `kwargs` for new predictions. Namely, \n",
"\n",
"- `include_last_observations` : boolean flag in order to carry adstock effects from last observations in the training dataset"
]
Expand Down Expand Up @@ -10313,9 +10313,7 @@
}
],
"source": [
"y_out_of_sample = mmm.sample_posterior_predictive(\n",
" X_pred=X_out_of_sample, extend_idata=False\n",
")\n",
"y_out_of_sample = mmm.sample_posterior_predictive(X_out_of_sample, extend_idata=False)\n",
"\n",
"y_out_of_sample"
]
Expand Down Expand Up @@ -10435,7 +10433,7 @@
],
"source": [
"y_out_of_sample_with_adstock = mmm.sample_posterior_predictive(\n",
" X_pred=X_out_of_sample, extend_idata=False, include_last_observations=True\n",
" X_out_of_sample, extend_idata=False, include_last_observations=True\n",
")"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@
" mmm = fit_mmm(mmm, X_train, y_train, random_seed)\n",
"\n",
" y_pred_test = mmm.sample_posterior_predictive(\n",
" X_pred=X_test,\n",
" X_test,\n",
" include_last_observations=True,\n",
" original_scale=True,\n",
" extend_idata=False,\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/source/notebooks/mmm/mmm_tvp_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,7 @@
" # Sample posterior predictive in whole data range (train and test)\n",
" if \"posterior_predictive\" not in mmm.idata:\n",
" mmm.sample_posterior_predictive(\n",
" X_pred=DATA, extend_idata=True, var_names=[\"y\", \"intercept\"]\n",
" DATA, extend_idata=True, var_names=[\"y\", \"intercept\"]\n",
" )\n",
" mmm.y = target_series.values\n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions pymc_marketing/mlflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,7 +673,7 @@ class MMMWrapper(mlflow.pyfunc.PythonModel):
Combine chain and draw dims into sample. Won't work if a dim named sample already exists. Defaults to True.
include_last_observations : bool, default=False
Boolean determining whether to include the last observations of the training data in order to carry over
costs with the adstock transformation. Assumes that X_pred are the next predictions following the
costs with the adstock transformation. Assumes that X are the next predictions following the
training data. Defaults to False.
original_scale : bool, default=True
Boolean determining whether to return the predictions in the original scale of the target variable.
Expand Down Expand Up @@ -800,7 +800,7 @@ def log_mmm(
already exists. Used for posterior/prior predictive sampling. Defaults to True.
include_last_observations : bool, optional
Whether to include the last observations of training data for adstock transformation.
Assumes X_pred are next predictions following training data. Used for all prediction
Assumes X are next predictions following training data. Used for all prediction
methods. Defaults to False.
original_scale : bool, optional
Whether to return predictions in original scale of target variable. Used for all
Expand Down
7 changes: 3 additions & 4 deletions pymc_marketing/mmm/hsgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,20 +655,19 @@ def parameterize_from_data(
if isinstance(X, np.ndarray):
numeric_X = np.asarray(X)
elif isinstance(X, TensorVariable):
numeric_X = X.get_value(borrow=False) # current numeric data
numeric_X = X.get_value(borrow=False)
else:
raise ValueError(
"X must be a NumPy array (or list) or a pm.Data/pm.MutableData. "
"X must be a NumPy array (or list) or a TensorVariable. "
"If it's a plain symbolic tensor, you must manually specify m, L."
)

numeric_X = np.asarray(numeric_X)
if X_mid is None:
X_mid = float(numeric_X.mean())

# 3. Use the standard approximation
m, L = create_m_and_L_recommendations(
numeric_X, # numeric version
numeric_X,
X_mid,
ls_lower=ls_lower,
ls_upper=ls_upper,
Expand Down
18 changes: 10 additions & 8 deletions pymc_marketing/mmm/mmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
create_new_spend_data,
)
from pymc_marketing.mmm.validating import ValidateControlColumns
from pymc_marketing.model_builder import _handle_deprecate_pred_argument
from pymc_marketing.model_config import parse_model_config
from pymc_marketing.prior import Prior

Expand Down Expand Up @@ -1902,7 +1903,7 @@ def legend_title_func(channel):

def sample_posterior_predictive(
self,
X_pred,
X=None,
extend_idata: bool = True,
combined: bool = True,
include_last_observations: bool = False,
Expand All @@ -1913,15 +1914,15 @@ def sample_posterior_predictive(
Parameters
----------
X_pred : array, shape (n_pred, n_features)
X : array, shape (n_pred, n_features)
The input data used for prediction.
extend_idata : bool, optional
Boolean determining whether the predictions should be added to inference data object. Defaults to True.
combined: bool, optional
Combine chain and draw dims into sample. Won't work if a dim named sample already exists. Defaults to True.
include_last_observations: bool, optional
Boolean determining whether to include the last observations of the training data in order to carry over
costs with the adstock transformation. Assumes that X_pred are the next predictions following the
costs with the adstock transformation. Assumes that X are the next predictions following the
training data.Defaults to False.
original_scale: bool, optional
Boolean determining whether to return the predictions in the original scale of the target variable.
Expand All @@ -1932,15 +1933,16 @@ def sample_posterior_predictive(
Returns
-------
posterior_predictive_samples : DataArray, shape (n_pred, samples)
Posterior predictive samples for each input X_pred
Posterior predictive samples for each input X
"""
X = _handle_deprecate_pred_argument(X, "X", sample_posterior_predictive_kwargs)
if include_last_observations:
X_pred = pd.concat(
[self.X.iloc[-self.adstock.l_max :, :], X_pred], axis=0
X = pd.concat(
[self.X.iloc[-self.adstock.l_max :, :], X], axis=0
).sort_values(by=self.date_column)

self._data_setter(X_pred)
self._data_setter(X)

with self.model: # sample with new input data
post_pred = pm.sample_posterior_predictive(
Expand Down Expand Up @@ -2251,7 +2253,7 @@ def sample_response_distribution(
constant_data = allocation_strategy.to_dataset(name="allocation")

return self.sample_posterior_predictive(
X_pred=synth_dataset,
X=synth_dataset,
extend_idata=False,
include_last_observations=True,
original_scale=False,
Expand Down
Loading

0 comments on commit 75ef423

Please sign in to comment.