From 8e464b3db4418226a9a94751b198c6fc9aa98043 Mon Sep 17 00:00:00 2001 From: janfb Date: Tue, 15 Feb 2022 11:20:09 +0100 Subject: [PATCH] remove train methods of snle children. --- sbi/inference/snle/mnle.py | 50 ------------------------------------ sbi/inference/snle/snle_a.py | 50 ------------------------------------ 2 files changed, 100 deletions(-) diff --git a/sbi/inference/snle/mnle.py b/sbi/inference/snle/mnle.py index 941d5f010..d49efe347 100644 --- a/sbi/inference/snle/mnle.py +++ b/sbi/inference/snle/mnle.py @@ -67,56 +67,6 @@ def __init__( kwargs = del_entries(locals(), entries=("self", "__class__", "unused_args")) super().__init__(**kwargs, **unused_args) - def train( - self, - training_batch_size: int = 50, - learning_rate: float = 5e-4, - validation_fraction: float = 0.1, - stop_after_epochs: int = 20, - max_num_epochs: int = 2**31 - 1, - clip_max_norm: Optional[float] = 5.0, - exclude_invalid_x: bool = True, - resume_training: bool = False, - discard_prior_samples: bool = False, - retrain_from_scratch: bool = False, - show_train_summary: bool = False, - dataloader_kwargs: Optional[Dict] = None, - ) -> NeuralPosterior: - r"""Return density estimator that approximates the distribution $p(x|\theta)$. - - Args: - training_batch_size: Training batch size. - learning_rate: Learning rate for Adam optimizer. - validation_fraction: The fraction of data to use for validation. - stop_after_epochs: The number of epochs to wait for improvement on the - validation set before terminating training. - max_num_epochs: Maximum number of epochs to run. If reached, we stop - training even when the validation loss is still decreasing. If None, we - train until validation loss increases (see also `stop_after_epochs`). - clip_max_norm: Value at which to clip the total gradient norm in order to - prevent exploding gradients. Use None for no clipping. - exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞` - during training. Expect errors, silent or explicit, when `False`. - resume_training: Can be used in case training time is limited, e.g. on a - cluster. If `True`, the split between train and validation set, the - optimizer, the number of epochs, and the best validation log-prob will - be restored from the last time `.train()` was called. - discard_prior_samples: Whether to discard samples simulated in round 1, i.e. - from the prior. Training may be sped up by ignoring such less targeted - samples. - retrain_from_scratch: Whether to retrain the conditional density - estimator for the posterior from scratch each round. - show_train_summary: Whether to print the number of epochs and validation - loss and leakage after the training. - dataloader_kwargs: Additional or updated kwargs to be passed to the training - and validation dataloaders (like, e.g., a collate_fn) - - Returns: - Density estimator that approximates the distribution $p(x|\theta)$. - """ - kwargs = del_entries(locals(), entries=("self", "__class__")) - return super().train(**kwargs) - def build_posterior( self, density_estimator: Optional[TorchModule] = None, diff --git a/sbi/inference/snle/snle_a.py b/sbi/inference/snle/snle_a.py index 201ac8eb8..ff2962439 100644 --- a/sbi/inference/snle/snle_a.py +++ b/sbi/inference/snle/snle_a.py @@ -51,53 +51,3 @@ def __init__( kwargs = del_entries(locals(), entries=("self", "__class__")) super().__init__(**kwargs) - - def train( - self, - training_batch_size: int = 50, - learning_rate: float = 5e-4, - validation_fraction: float = 0.1, - stop_after_epochs: int = 20, - max_num_epochs: int = 2**31 - 1, - clip_max_norm: Optional[float] = 5.0, - exclude_invalid_x: bool = True, - resume_training: bool = False, - discard_prior_samples: bool = False, - retrain_from_scratch: bool = False, - show_train_summary: bool = False, - dataloader_kwargs: Optional[Dict] = None, - ) -> flows.Flow: - r"""Return density estimator that approximates the distribution $p(x|\theta)$. - - Args: - training_batch_size: Training batch size. - learning_rate: Learning rate for Adam optimizer. - validation_fraction: The fraction of data to use for validation. - stop_after_epochs: The number of epochs to wait for improvement on the - validation set before terminating training. - max_num_epochs: Maximum number of epochs to run. If reached, we stop - training even when the validation loss is still decreasing. Otherwise, - we train until validation loss increases (see also `stop_after_epochs`). - clip_max_norm: Value at which to clip the total gradient norm in order to - prevent exploding gradients. Use None for no clipping. - exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞` - during training. Expect errors, silent or explicit, when `False`. - resume_training: Can be used in case training time is limited, e.g. on a - cluster. If `True`, the split between train and validation set, the - optimizer, the number of epochs, and the best validation log-prob will - be restored from the last time `.train()` was called. - discard_prior_samples: Whether to discard samples simulated in round 1, i.e. - from the prior. Training may be sped up by ignoring such less targeted - samples. - retrain_from_scratch: Whether to retrain the conditional density - estimator for the posterior from scratch each round. - show_train_summary: Whether to print the number of epochs and validation - loss and leakage after the training. - dataloader_kwargs: Additional or updated kwargs to be passed to the training - and validation dataloaders (like, e.g., a collate_fn) - - Returns: - Density estimator that approximates the distribution $p(x|\theta)$. - """ - kwargs = del_entries(locals(), entries=("self", "__class__")) - return super().train(**kwargs)