Skip to content

Commit

Permalink
force_first_round_loss=True for SNPE-A
Browse files Browse the repository at this point in the history
  • Loading branch information
michaeldeistler committed Aug 23, 2022
1 parent b7f0e9e commit 6f7c4c3
Showing 1 changed file with 1 addition and 3 deletions.
4 changes: 1 addition & 3 deletions sbi/inference/snpe/snpe_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ def train(
clip_max_norm: Optional[float] = 5.0,
calibration_kernel: Optional[Callable] = None,
resume_training: bool = False,
force_first_round_loss: bool = False,
retrain_from_scratch: bool = False,
show_train_summary: bool = False,
dataloader_kwargs: Optional[Dict] = None,
Expand Down Expand Up @@ -144,8 +143,6 @@ def train(
force_first_round_loss: If `True`, train with maximum likelihood,
i.e., potentially ignoring the correction for using a proposal
distribution different from the prior.
force_first_round_loss: If `True`, train with maximum likelihood,
regardless of the proposal distribution.
retrain_from_scratch: Whether to retrain the conditional density
estimator for the posterior from scratch each round. Not supported for
SNPE-A.
Expand Down Expand Up @@ -174,6 +171,7 @@ def train(

# SNPE-A always discards the prior samples.
kwargs["discard_prior_samples"] = True
kwargs["force_first_round_loss"] = True

self._round = max(self._data_round_index)

Expand Down

0 comments on commit 6f7c4c3

Please sign in to comment.