Skip to content

Commit

Permalink
Enable model serialization
Browse files Browse the repository at this point in the history
  • Loading branch information
kbattocchi committed Jun 15, 2020
1 parent 1a78dcb commit 119ca77
Show file tree
Hide file tree
Showing 10 changed files with 669 additions and 583 deletions.
14 changes: 5 additions & 9 deletions econml/_ortho_learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class in this module implements the general logic in a very versatile way
from warnings import warn
from .utilities import (shape, reshape, ndim, hstack, cross_product, transpose, inverse_onehot,
broadcast_unit_treatments, reshape_treatmentwise_effects,
StatsModelsLinearRegression, LassoCVWrapper)
StatsModelsLinearRegression, _EncoderWrapper)
from sklearn.model_selection import KFold, StratifiedKFold, check_cv
from sklearn.linear_model import LinearRegression, LassoCV
from sklearn.preprocessing import (PolynomialFeatures, LabelEncoder, OneHotEncoder,
Expand Down Expand Up @@ -551,12 +551,10 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
else:
to_split = Z # just stratify on Z

z_ohe = OneHotEncoder(categories='auto', sparse=False)
Z = z_ohe.fit_transform(reshape(Z, (-1, 1)))[:, 1:]
z_ohe = OneHotEncoder(categories='auto', sparse=False, drop='first')
Z = z_ohe.fit_transform(reshape(Z, (-1, 1)))
self.z_transformer = FunctionTransformer(
func=(lambda Z:
z_ohe.transform(
reshape(z_enc.transform(Z.ravel()), (-1, 1)))[:, 1:]),
func=_EncoderWrapper(z_ohe, z_enc).encode,
validate=False)
else:
# stratify on T if discrete, and fine to pass T as second arg to KFold.split even when not
Expand All @@ -582,9 +580,7 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
if self._discrete_treatment:
self._d_t = shape(T)[1:]
self.transformer = FunctionTransformer(
func=(lambda T:
self._one_hot_encoder.transform(
reshape(T, (-1, 1)))),
func=_EncoderWrapper(self._one_hot_encoder).encode,
validate=False)

nuisances, fitted_models, fitted_inds, scores = _crossfit(self._model_nuisance, folds,
Expand Down
153 changes: 78 additions & 75 deletions econml/_rlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,82 @@
from ._ortho_learner import _OrthoLearner


class _ModelNuisance:
"""
Nuisance model fits the model_y and model_t at fit time and at predict time
calculates the residual Y and residual T based on the fitted models and returns
the residuals as two nuisance parameters.
"""

def __init__(self, model_y, model_t):
self._model_y = clone(model_y, safe=False)
self._model_t = clone(model_t, safe=False)

def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert Z is None, "Cannot accept instrument!"
self._model_t.fit(X, W, T, sample_weight=sample_weight)
self._model_y.fit(X, W, Y, sample_weight=sample_weight)
return self

def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
if hasattr(self._model_y, 'score'):
Y_score = self._model_y.score(X, W, Y, sample_weight=sample_weight)
else:
Y_score = None
if hasattr(self._model_t, 'score'):
T_score = self._model_t.score(X, W, T, sample_weight=sample_weight)
else:
T_score = None
return Y_score, T_score

def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
Y_pred = self._model_y.predict(X, W)
T_pred = self._model_t.predict(X, W)
if (X is None) and (W is None): # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
T_pred = np.tile(T_pred.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - T_pred.reshape(T.shape)
return Y_res, T_res


class _ModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - E[Y | X, W] = \\theta(X) \\cdot (T - E[T | X, W]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""

def __init__(self, model_final):
self._model_final = clone(model_final, safe=False)

def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res = nuisances
self._model_final.fit(X, T_res, Y_res, sample_weight=sample_weight, sample_var=sample_var)
return self

def predict(self, X=None):
return self._model_final.predict(X)

def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res = nuisances
if Y_res.ndim == 1:
Y_res = Y_res.reshape((-1, 1))
if T_res.ndim == 1:
T_res = T_res.reshape((-1, 1))
effects = self._model_final.predict(X).reshape((-1, Y_res.shape[1], T_res.shape[1]))
Y_res_pred = np.einsum('ijk,ik->ij', effects, T_res).reshape(Y_res.shape)
if sample_weight is not None:
return np.mean(np.average((Y_res - Y_res_pred)**2, weights=sample_weight, axis=0))
else:
return np.mean((Y_res - Y_res_pred) ** 2)


class _RLearner(_OrthoLearner):
"""
Base class for CATE learners that residualize treatment and outcome and run residual on residual regression.
Expand Down Expand Up @@ -202,82 +278,9 @@ def predict(self, X):

def __init__(self, model_y, model_t, model_final,
discrete_treatment, categories, n_splits, random_state):
class ModelNuisance:
"""
Nuisance model fits the model_y and model_t at fit time and at predict time
calculates the residual Y and residual T based on the fitted models and returns
the residuals as two nuisance parameters.
"""

def __init__(self, model_y, model_t):
self._model_y = clone(model_y, safe=False)
self._model_t = clone(model_t, safe=False)

def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
assert Z is None, "Cannot accept instrument!"
self._model_t.fit(X, W, T, sample_weight=sample_weight)
self._model_y.fit(X, W, Y, sample_weight=sample_weight)
return self

def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
if hasattr(self._model_y, 'score'):
Y_score = self._model_y.score(X, W, Y, sample_weight=sample_weight)
else:
Y_score = None
if hasattr(self._model_t, 'score'):
T_score = self._model_t.score(X, W, T, sample_weight=sample_weight)
else:
T_score = None
return Y_score, T_score

def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None):
Y_pred = self._model_y.predict(X, W)
T_pred = self._model_t.predict(X, W)
if (X is None) and (W is None): # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
T_pred = np.tile(T_pred.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - T_pred.reshape(T.shape)
return Y_res, T_res

class ModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - E[Y | X, W] = \\theta(X) \\cdot (T - E[T | X, W]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""

def __init__(self, model_final):
self._model_final = clone(model_final, safe=False)

def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res = nuisances
self._model_final.fit(X, T_res, Y_res, sample_weight=sample_weight, sample_var=sample_var)
return self

def predict(self, X=None):
return self._model_final.predict(X)

def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):
Y_res, T_res = nuisances
if Y_res.ndim == 1:
Y_res = Y_res.reshape((-1, 1))
if T_res.ndim == 1:
T_res = T_res.reshape((-1, 1))
effects = self._model_final.predict(X).reshape((-1, Y_res.shape[1], T_res.shape[1]))
Y_res_pred = np.einsum('ijk,ik->ij', effects, T_res).reshape(Y_res.shape)
if sample_weight is not None:
return np.mean(np.average((Y_res - Y_res_pred)**2, weights=sample_weight, axis=0))
else:
return np.mean((Y_res - Y_res_pred)**2)

super().__init__(ModelNuisance(model_y, model_t),
ModelFinal(model_final),
super().__init__(_ModelNuisance(model_y, model_t),
_ModelFinal(model_final),
discrete_treatment=discrete_treatment,
discrete_instrument=False, # no instrument, so doesn't matter
categories=categories,
Expand Down
40 changes: 21 additions & 19 deletions econml/dml.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
import copy
from warnings import warn
from .utilities import (shape, reshape, ndim, hstack, cross_product, transpose, inverse_onehot,
broadcast_unit_treatments, reshape_treatmentwise_effects,
broadcast_unit_treatments, reshape_treatmentwise_effects, add_intercept,
StatsModelsLinearRegression, LassoCVWrapper, check_high_dimensional)
from econml.sklearn_extensions.linear_model import MultiOutputDebiasedLasso, WeightedLassoCVWrapper
from econml.sklearn_extensions.ensemble import SubsampledHonestForest
Expand Down Expand Up @@ -126,14 +126,13 @@ def __init__(self, model_final, fit_cate_intercept, featurizer, use_weight_trick
else:
self._fit_cate_intercept = fit_cate_intercept
if self._fit_cate_intercept:
add_intercept = FunctionTransformer(lambda F:
hstack([np.ones((F.shape[0], 1)), F]),
validate=True)
add_intercept_trans = FunctionTransformer(add_intercept,
validate=True)
if featurizer:
self._featurizer = Pipeline([('featurize', self._original_featurizer),
('add_intercept', add_intercept)])
('add_intercept', add_intercept_trans)])
else:
self._featurizer = add_intercept
self._featurizer = add_intercept_trans
else:
self._featurizer = self._original_featurizer

Expand Down Expand Up @@ -704,6 +703,21 @@ def fit(self, Y, T, X=None, W=None, sample_weight=None, sample_var=None, inferen
return super().fit(Y, T, X=X, W=W, sample_weight=sample_weight, sample_var=None, inference=inference)


class _RandomFeatures(TransformerMixin):
def __init__(self, dim, bw, random_state):
self._dim = dim
self._bw = bw
self._random_state = check_random_state(random_state)

def fit(self, X):
self.omegas = self._random_state.normal(0, 1 / self._bw, size=(shape(X)[1], self._dim))
self.biases = self._random_state.uniform(0, 2 * np.pi, size=(1, self._dim))
return self

def transform(self, X):
return np.sqrt(2 / self._dim) * np.cos(np.matmul(X, self.omegas) + self.biases)


class KernelDMLCateEstimator(DMLCateEstimator):
"""
A specialized version of the linear Double ML Estimator that uses random fourier features.
Expand Down Expand Up @@ -764,21 +778,9 @@ class KernelDMLCateEstimator(DMLCateEstimator):

def __init__(self, model_y=WeightedLassoCVWrapper(), model_t='auto', fit_cate_intercept=True,
dim=20, bw=1.0, discrete_treatment=False, categories='auto', n_splits=2, random_state=None):
class RandomFeatures(TransformerMixin):
def __init__(self, random_state):
self._random_state = check_random_state(random_state)

def fit(self, X):
self.omegas = self._random_state.normal(0, 1 / bw, size=(shape(X)[1], dim))
self.biases = self._random_state.uniform(0, 2 * np.pi, size=(1, dim))
return self

def transform(self, X):
return np.sqrt(2 / dim) * np.cos(np.matmul(X, self.omegas) + self.biases)

super().__init__(model_y=model_y, model_t=model_t,
model_final=ElasticNetCV(fit_intercept=False),
featurizer=RandomFeatures(random_state),
featurizer=_RandomFeatures(random_state),
fit_cate_intercept=fit_cate_intercept,
discrete_treatment=discrete_treatment,
categories=categories,
Expand Down
Loading

0 comments on commit 119ca77

Please sign in to comment.