From cdf44ccd0ff8d3c6af5efb6fbeced9c49d5f81b7 Mon Sep 17 00:00:00 2001 From: jsvetter Date: Tue, 27 Aug 2024 12:18:19 +0200 Subject: [PATCH] feat: score-based density estimators for SBI (#1015) * Initial draft for Neural Posterior Score Estimation (NPSE) * Rename NSPE->NPSE and Geffner->iid_bridge * new structure for potentials and posteriors * add support for MLP denoiser with ada_ln conditioning * fixup for `log_prob()` of score matching methods * fixed tutorial link in README and wip for fmpe+npse tutorial * better argument handling for score nets * finished NPSE tutorial, added calls to tut 16-implemented methods, and fixed some docstrings * small fixes, docstrings, import sorting. * add ode sampling via zuko * undo potential fix for iid sampling * add errors for MAP and iid data, adapt tests * Remove kernels; remove correctors; remove ddim predictor; rename some symbols * remove file that did not contain tests * fewer tests for npse * C2ST tests pass by putting _converged back in * Improve documentation and docstrings * removing ddim functions * remove unreachable code * consistent default kwargs * Remove iid_bridge (to be left for a future PR) * Add options to docstring * consistent use of loss/log_prob in inference methods * Add citation for AdaMLP * docs: add fmpe to tutorials, fix docstrings --------- Co-authored-by: rdgao-lajolla Co-authored-by: michaeldeistler Co-authored-by: Jan Boelts Co-authored-by: manuelgloeckler Co-authored-by: Guy Moss --- sbi/analysis/tensorboard_output.py | 2 +- sbi/inference/__init__.py | 1 + sbi/inference/base.py | 42 +- sbi/inference/fmpe/fmpe_base.py | 13 +- sbi/inference/npse/__init__.py | 1 + sbi/inference/npse/npse.py | 550 +++++++++++++++ sbi/inference/posteriors/base_posterior.py | 50 +- sbi/inference/posteriors/direct_posterior.py | 4 +- sbi/inference/posteriors/mcmc_posterior.py | 2 +- sbi/inference/posteriors/score_posterior.py | 367 ++++++++++ sbi/inference/potentials/base_potential.py | 5 + .../potentials/likelihood_based_potential.py | 4 +- .../potentials/posterior_based_potential.py | 4 +- .../potentials/score_based_potential.py | 231 +++++++ sbi/inference/snle/snle_base.py | 24 +- sbi/inference/snpe/snpe_a.py | 2 +- sbi/inference/snpe/snpe_base.py | 24 +- sbi/inference/snpe/snpe_c.py | 2 +- sbi/inference/snre/snre_base.py | 20 +- sbi/neural_nets/__init__.py | 2 +- sbi/neural_nets/categorial.py | 2 +- .../density_estimators/__init__.py | 13 - sbi/neural_nets/embedding_nets.py | 16 + sbi/neural_nets/estimators/__init__.py | 10 + .../base.py | 41 ++ .../categorical_net.py | 5 +- .../flowmatching_estimator.py | 2 +- .../mixed_density_estimator.py | 4 +- .../nflows_flow.py | 2 +- sbi/neural_nets/estimators/score_estimator.py | 654 ++++++++++++++++++ .../shape_handling.py | 0 .../zuko_flow.py | 2 +- sbi/neural_nets/factory.py | 94 ++- sbi/neural_nets/flow.py | 3 +- sbi/neural_nets/flow_matcher.py | 2 +- sbi/neural_nets/mdn.py | 2 +- sbi/neural_nets/mnle.py | 4 +- sbi/neural_nets/score_nets.py | 376 ++++++++++ sbi/samplers/score/correctors.py | 65 ++ sbi/samplers/score/predictors.py | 122 ++++ sbi/samplers/score/score.py | 160 +++++ sbi/simulators/linear_gaussian.py | 2 +- sbi/utils/__init__.py | 1 + sbi/utils/metrics.py | 2 + sbi/utils/user_input_checks.py | 5 +- tests/density_estimator_test.py | 4 +- tests/lc2st_test.py | 2 +- tests/linearGaussian_npse_test.py | 237 +++++++ tests/linearGaussian_snpe_test.py | 5 +- tests/posterior_nn_test.py | 8 +- tests/sbc_test.py | 28 +- tests/score_estimator_test.py | 146 ++++ tests/score_samplers_test.py | 82 +++ tests/test_utils.py | 1 - tutorials/16_implemented_methods.ipynb | 175 +++-- .../19_flowmatching_and_scorematching.ipynb | 338 +++++++++ 56 files changed, 3764 insertions(+), 201 deletions(-) create mode 100644 sbi/inference/npse/__init__.py create mode 100644 sbi/inference/npse/npse.py create mode 100644 sbi/inference/posteriors/score_posterior.py create mode 100644 sbi/inference/potentials/score_based_potential.py delete mode 100644 sbi/neural_nets/density_estimators/__init__.py create mode 100644 sbi/neural_nets/estimators/__init__.py rename sbi/neural_nets/{density_estimators => estimators}/base.py (85%) rename sbi/neural_nets/{density_estimators => estimators}/categorical_net.py (96%) rename sbi/neural_nets/{density_estimators => estimators}/flowmatching_estimator.py (98%) rename sbi/neural_nets/{density_estimators => estimators}/mixed_density_estimator.py (98%) rename sbi/neural_nets/{density_estimators => estimators}/nflows_flow.py (98%) create mode 100644 sbi/neural_nets/estimators/score_estimator.py rename sbi/neural_nets/{density_estimators => estimators}/shape_handling.py (100%) rename sbi/neural_nets/{density_estimators => estimators}/zuko_flow.py (98%) create mode 100644 sbi/neural_nets/score_nets.py create mode 100644 sbi/samplers/score/correctors.py create mode 100644 sbi/samplers/score/predictors.py create mode 100644 sbi/samplers/score/score.py create mode 100644 tests/linearGaussian_npse_test.py create mode 100644 tests/score_estimator_test.py create mode 100644 tests/score_samplers_test.py create mode 100644 tutorials/19_flowmatching_and_scorematching.ipynb diff --git a/sbi/analysis/tensorboard_output.py b/sbi/analysis/tensorboard_output.py index fc32ecf1b..447158de2 100644 --- a/sbi/analysis/tensorboard_output.py +++ b/sbi/analysis/tensorboard_output.py @@ -61,7 +61,7 @@ def plot_summary( logger = logging.getLogger(__name__) if tags is None: - tags = ["validation_log_probs"] + tags = ["validation_loss"] size_guidance = deepcopy(DEFAULT_SIZE_GUIDANCE) size_guidance.update(scalars=tensorboard_scalar_limit) diff --git a/sbi/inference/__init__.py b/sbi/inference/__init__.py index 4cf8210d1..f1275e392 100644 --- a/sbi/inference/__init__.py +++ b/sbi/inference/__init__.py @@ -6,6 +6,7 @@ simulate_for_sbi, ) from sbi.inference.fmpe import FMPE +from sbi.inference.npse.npse import NPSE from sbi.inference.snle import MNLE, SNLE_A from sbi.inference.snpe import SNPE_A, SNPE_B, SNPE_C # noqa: F401 from sbi.inference.snre import BNRE, SNRE, SNRE_A, SNRE_B, SNRE_C # noqa: F401 diff --git a/sbi/inference/base.py b/sbi/inference/base.py index 2072420b8..b4ca20f99 100644 --- a/sbi/inference/base.py +++ b/sbi/inference/base.py @@ -176,7 +176,7 @@ def __init__( self._data_round_index = [] self._round = 0 - self._val_log_prob = float("-Inf") + self._val_loss = float("Inf") # XXX We could instantiate here the Posterior for all children. Two problems: # 1. We must dispatch to right PotentialProvider for mcmc based on name @@ -190,9 +190,9 @@ def __init__( # Logging during training (by SummaryWriter). self._summary = dict( epochs_trained=[], - best_validation_log_prob=[], - validation_log_probs=[], - training_log_probs=[], + best_validation_loss=[], + validation_loss=[], + training_loss=[], epoch_durations_sec=[], ) @@ -393,8 +393,8 @@ def _converged(self, epoch: int, stop_after_epochs: int) -> bool: neural_net = self._neural_net # (Re)-start the epoch count with the first epoch or any improvement. - if epoch == 0 or self._val_log_prob > self._best_val_log_prob: - self._best_val_log_prob = self._val_log_prob + if epoch == 0 or self._val_loss < self._best_val_loss: + self._best_val_loss = self._val_loss self._epochs_since_last_improvement = 0 self._best_model_state_dict = deepcopy(neural_net.state_dict()) else: @@ -419,14 +419,14 @@ def _default_summary_writer(self) -> SummaryWriter: @staticmethod def _describe_round(round_: int, summary: Dict[str, list]) -> str: epochs = summary["epochs_trained"][-1] - best_validation_log_prob = summary["best_validation_log_prob"][-1] + best_validation_loss = summary["best_validation_loss"][-1] description = f""" ------------------------- ||||| ROUND {round_ + 1} STATS |||||: ------------------------- Epochs trained: {epochs} - Best validation performance: {best_validation_log_prob:.4f} + Best validation performance: {best_validation_loss:.4f} ------------------------- """ @@ -472,12 +472,12 @@ def _summarize( Scalar tags: - epochs_trained: number of epochs trained - - best_validation_log_prob: - best validation log prob (for each round). - - validation_log_probs: - validation log probs for every epoch (for each round). - - training_log_probs - training log probs for every epoch (for each round). + - best_validation_loss: + best validation loss (for each round). + - validation_loss: + validation loss for every epoch (for each round). + - training_loss + training loss for every epoch (for each round). - epoch_durations_sec epoch duration for every epoch (for each round) @@ -491,28 +491,28 @@ def _summarize( ) self._summary_writer.add_scalar( - tag="best_validation_log_prob", - scalar_value=self._summary["best_validation_log_prob"][-1], + tag="best_validation_loss", + scalar_value=self._summary["best_validation_loss"][-1], global_step=round_ + 1, ) - # Add validation log prob for every epoch. + # Add validation loss for every epoch. # Offset with all previous epochs. offset = ( torch.tensor(self._summary["epochs_trained"][:-1], dtype=torch.int) .sum() .item() ) - for i, vlp in enumerate(self._summary["validation_log_probs"][offset:]): + for i, vlp in enumerate(self._summary["validation_loss"][offset:]): self._summary_writer.add_scalar( - tag="validation_log_probs", + tag="validation_loss", scalar_value=vlp, global_step=offset + i, ) - for i, tlp in enumerate(self._summary["training_log_probs"][offset:]): + for i, tlp in enumerate(self._summary["training_loss"][offset:]): self._summary_writer.add_scalar( - tag="training_log_probs", + tag="training_loss", scalar_value=tlp, global_step=offset + i, ) diff --git a/sbi/inference/fmpe/fmpe_base.py b/sbi/inference/fmpe/fmpe_base.py index d17471d53..043a064ae 100644 --- a/sbi/inference/fmpe/fmpe_base.py +++ b/sbi/inference/fmpe/fmpe_base.py @@ -241,8 +241,7 @@ def train( self.epoch += 1 train_loss_average = train_loss_sum / len(train_loader) # type: ignore - # TODO: rename to loss once renaming is done in base class. - self._summary["training_log_probs"].append(-train_loss_average) + self._summary["training_loss"].append(train_loss_average) # Calculate validation performance. self._neural_net.eval() @@ -262,11 +261,8 @@ def train( self._val_loss = val_loss_sum / ( len(val_loader) * val_loader.batch_size # type: ignore ) - # TODO: remove this once renaming to loss in base class is done. - self._val_log_prob = -self._val_loss - # Log validation log prob for every epoch. - # TODO: rename to loss and fix sign once renaming in base is done. - self._summary["validation_log_probs"].append(-self._val_loss) + # Log validation loss for every epoch. + self._summary["validation_loss"].append(self._val_loss) self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time) self._maybe_show_progress(self._show_progress_bars, self.epoch) @@ -275,8 +271,7 @@ def train( # Update summary. self._summary["epochs_trained"].append(self.epoch) - # TODO: rename to loss once renaming is done in base class. - self._summary["best_validation_log_prob"].append(self._best_val_log_prob) + self._summary["best_validation_loss"].append(self._best_val_loss) # Update tensorboard and summary dict. self._summarize(round_=self._round) diff --git a/sbi/inference/npse/__init__.py b/sbi/inference/npse/__init__.py new file mode 100644 index 000000000..f861c3450 --- /dev/null +++ b/sbi/inference/npse/__init__.py @@ -0,0 +1 @@ +from sbi.inference.npse.npse import NPSE diff --git a/sbi/inference/npse/npse.py b/sbi/inference/npse/npse.py new file mode 100644 index 000000000..fbbe4fcd9 --- /dev/null +++ b/sbi/inference/npse/npse.py @@ -0,0 +1,550 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see +import time +from copy import deepcopy +from typing import Any, Callable, Optional, Union + +import torch +from torch import Tensor, ones +from torch.distributions import Distribution +from torch.nn.utils.clip_grad import clip_grad_norm_ +from torch.optim.adam import Adam +from torch.utils.tensorboard.writer import SummaryWriter + +from sbi import utils as utils +from sbi.inference import NeuralInference +from sbi.inference.posteriors import ( + DirectPosterior, +) +from sbi.inference.posteriors.score_posterior import ScorePosterior +from sbi.neural_nets.estimators.score_estimator import ConditionalScoreEstimator +from sbi.neural_nets.factory import posterior_score_nn +from sbi.utils import ( + check_estimator_arg, + handle_invalid_x, + npe_msg_on_invalid_x, + test_posterior_net_for_multi_d_x, + validate_theta_and_x, + warn_if_zscoring_changes_data, + x_shape_from_simulation, +) +from sbi.utils.sbiutils import ImproperEmpirical, mask_sims_from_prior + + +class NPSE(NeuralInference): + def __init__( + self, + prior: Optional[Distribution] = None, + score_estimator: Union[str, Callable] = "mlp", + sde_type: str = "ve", + device: str = "cpu", + logging_level: Union[int, str] = "WARNING", + summary_writer: Optional[SummaryWriter] = None, + show_progress_bars: bool = True, + **kwargs, + ): + """Base class for Neural Posterior Score Estimation methods. + + Instead of performing conditonal *density* estimation, NPSE methods perform + conditional *score* estimation i.e. they estimate the gradient of the log + density using denoising score matching loss. + + NOTE: NPSE does not support multi-round inference with flexible proposals yet. + You can try to run multi-round with truncated proposals, but note that this is + not tested yet. + + Args: + prior: Prior distribution. + score_estimator: Neural network architecture for the score estimator. Can be + a string (e.g. 'mlp' or 'ada_mlp') or a callable that returns a neural + network. + sde_type: Type of SDE to use. Must be one of ['vp', 've', 'subvp']. + device: Device to run the training on. + logging_level: Logging level for the training. Can be an integer or a + string. + summary_writer: Tensorboard summary writer. + show_progress_bars: Whether to show progress bars during training. + kwargs: Additional keyword arguments. + + References: + - Geffner, Tomas, George Papamakarios, and Andriy Mnih. "Score modeling for + simulation-based inference." ICML 2023. + - Sharrock, Louis, et al. "Sequential neural score estimation: Likelihood- + free inference with conditional score based diffusion models." ICML 2024. + """ + + super().__init__( + prior=prior, + device=device, + logging_level=logging_level, + summary_writer=summary_writer, + show_progress_bars=show_progress_bars, + ) + + # As detailed in the docstring, `score_estimator` is either a string or + # a callable. The function creating the neural network is attached to + # `_build_neural_net`. It will be called in the first round and receive + # thetas and xs as inputs, so that they can be used for shape inference and + # potentially for z-scoring. + check_estimator_arg(score_estimator) + if isinstance(score_estimator, str): + self._build_neural_net = posterior_score_nn( + sde_type=sde_type, score_net_type=score_estimator, **kwargs + ) + else: + self._build_neural_net = score_estimator + + self._proposal_roundwise = [] + + def append_simulations( + self, + theta: Tensor, + x: Tensor, + proposal: Optional[DirectPosterior] = None, + exclude_invalid_x: Optional[bool] = None, + data_device: Optional[str] = None, + ) -> "NPSE": + r"""Store parameters and simulation outputs to use them for later training. + + Data are stored as entries in lists for each type of variable (parameter/data). + + Stores $\theta$, $x$, prior_masks (indicating if simulations are coming from the + prior or not) and an index indicating which round the batch of simulations came + from. + + Args: + theta: Parameter sets. + x: Simulation outputs. + proposal: The distribution that the parameters $\theta$ were sampled from. + Pass `None` if the parameters were sampled from the prior. If not + `None`, it will trigger a different loss-function. + exclude_invalid_x: Whether invalid simulations are discarded during + training. For single-round SNPE, it is fine to discard invalid + simulations, but for multi-round SNPE (atomic), discarding invalid + simulations gives systematically wrong results. If `None`, it will + be `True` in the first round and `False` in later rounds. + data_device: Where to store the data, default is on the same device where + the training is happening. If training a large dataset on a GPU with not + much VRAM can set to 'cpu' to store data on system memory instead. + + Returns: + NeuralInference object (returned so that this function is chainable). + """ + assert ( + proposal is None + ), "Multi-round NPSE is not yet implemented. Please use single-round NPSE." + current_round = 0 + + if exclude_invalid_x is None: + exclude_invalid_x = current_round == 0 + + if data_device is None: + data_device = self._device + + theta, x = validate_theta_and_x( + theta, x, data_device=data_device, training_device=self._device + ) + + is_valid_x, num_nans, num_infs = handle_invalid_x( + x, exclude_invalid_x=exclude_invalid_x + ) + + x = x[is_valid_x] + theta = theta[is_valid_x] + + # Check for problematic z-scoring + warn_if_zscoring_changes_data(x) + + npe_msg_on_invalid_x(num_nans, num_infs, exclude_invalid_x, "Single-round NPE") + + self._data_round_index.append(current_round) + prior_masks = mask_sims_from_prior(int(current_round > 0), theta.size(0)) + + self._theta_roundwise.append(theta) + self._x_roundwise.append(x) + self._prior_masks.append(prior_masks) + + self._proposal_roundwise.append(proposal) + + if self._prior is None or isinstance(self._prior, ImproperEmpirical): + theta_prior = self.get_simulations()[0].to(self._device) + self._prior = ImproperEmpirical( + theta_prior, ones(theta_prior.shape[0], device=self._device) + ) + + return self + + def train( + self, + training_batch_size: int = 200, + learning_rate: float = 5e-4, + validation_fraction: float = 0.1, + stop_after_epochs: int = 200, + max_num_epochs: int = 2**31 - 1, + clip_max_norm: Optional[float] = 5.0, + calibration_kernel: Optional[Callable] = None, + ema_loss_decay: float = 0.1, + resume_training: bool = False, + force_first_round_loss: bool = False, + discard_prior_samples: bool = False, + retrain_from_scratch: bool = False, + show_train_summary: bool = False, + dataloader_kwargs: Optional[dict] = None, + ) -> ConditionalScoreEstimator: + r"""Returns a score estimator that approximates the score + $\nabla_\theta \log p(\theta|x)$. + + Args: + training_batch_size: Training batch size. + learning_rate: Learning rate for Adam optimizer. + validation_fraction: The fraction of data to use for validation. + stop_after_epochs: The number of epochs to wait for improvement on the + validation set before terminating training. + max_num_epochs: Maximum number of epochs to run. If reached, we stop + training even when the validation loss is still decreasing. Otherwise, + we train until validation loss increases (see also `stop_after_epochs`). + clip_max_norm: Value at which to clip the total gradient norm in order to + prevent exploding gradients. Use None for no clipping. + calibration_kernel: A function to calibrate the loss with respect + to the simulations `x` (optional). See Lueckmann, Gonçalves et al., + NeurIPS 2017. If `None`, no calibration is used. + resume_training: Can be used in case training time is limited, e.g. on a + cluster. If `True`, the split between train and validation set, the + optimizer, the number of epochs, and the best validation log-prob will + be restored from the last time `.train()` was called. + force_first_round_loss: If `True`, train with maximum likelihood, + i.e., potentially ignoring the correction for using a proposal + distribution different from the prior. + discard_prior_samples: Whether to discard samples simulated in round 1, i.e. + from the prior. Training may be sped up by ignoring such less targeted + samples. + retrain_from_scratch: Whether to retrain the conditional density + estimator for the posterior from scratch each round. + show_train_summary: Whether to print the number of epochs and validation + loss after the training. + dataloader_kwargs: Additional or updated kwargs to be passed to the training + and validation dataloaders (like, e.g., a collate_fn) + + Returns: + Score estimator that approximates the posterior score. + """ + # Load data from most recent round. + self._round = max(self._data_round_index) + + if self._round == 0 and self._neural_net is not None: + assert force_first_round_loss or resume_training, ( + "You have already trained this neural network. After you had trained " + "the network, you again appended simulations with `append_simulations" + "(theta, x)`, but you did not provide a proposal. If the new " + "simulations are sampled from the prior, you can set " + "`.train(..., force_first_round_loss=True`). However, if the new " + "simulations were not sampled from the prior, you should pass the " + "proposal, i.e. `append_simulations(theta, x, proposal)`. If " + "your samples are not sampled from the prior and you do not pass a " + "proposal and you set `force_first_round_loss=True`, the result of " + "NPSE will not be the true posterior. Instead, it will be the proposal " + "posterior, which (usually) is more narrow than the true posterior." + ) + + # Calibration kernels proposed in Lueckmann, Gonçalves et al., 2017. + if calibration_kernel is None: + + def default_calibration_kernel(x): + return ones([len(x)], device=self._device) + + calibration_kernel = default_calibration_kernel + + # Starting index for the training set (1 = discard round-0 samples). + start_idx = int(discard_prior_samples and self._round > 0) + + # Set the proposal to the last proposal that was passed by the user. For + # atomic SNPE, it does not matter what the proposal is. For non-atomic + # SNPE, we only use the latest data that was passed, i.e. the one from the + # last proposal. + proposal = self._proposal_roundwise[-1] + + train_loader, val_loader = self.get_dataloaders( + start_idx, + training_batch_size, + validation_fraction, + resume_training, + dataloader_kwargs=dataloader_kwargs, + ) + # First round or if retraining from scratch: + # Call the `self._build_neural_net` with the rounds' thetas and xs as + # arguments, which will build the neural network. + if self._neural_net is None or retrain_from_scratch: + # Get theta,x to initialize NN + theta, x, _ = self.get_simulations(starting_round=start_idx) + # Use only training data for building the neural net (z-scoring transforms) + + self._neural_net = self._build_neural_net( + theta[self.train_indices].to("cpu"), + x[self.train_indices].to("cpu"), + ) + self._x_shape = x_shape_from_simulation(x.to("cpu")) + + test_posterior_net_for_multi_d_x( + self._neural_net, + theta.to("cpu"), + x.to("cpu"), + ) + + del theta, x + + # Move entire net to device for training. + self._neural_net.to(self._device) + + if not resume_training: + self.optimizer = Adam(list(self._neural_net.parameters()), lr=learning_rate) + + self.epoch, self._val_loss = 0, float("Inf") + + while self.epoch <= max_num_epochs and not self._converged( + self.epoch, stop_after_epochs + ): + # Train for a single epoch. + self._neural_net.train() + train_loss_sum = 0 + epoch_start_time = time.time() + for batch in train_loader: + self.optimizer.zero_grad() + # Get batches on current device. + theta_batch, x_batch, masks_batch = ( + batch[0].to(self._device), + batch[1].to(self._device), + batch[2].to(self._device), + ) + + train_losses = self._loss( + theta_batch, + x_batch, + masks_batch, + proposal, + calibration_kernel, + force_first_round_loss=force_first_round_loss, + ) + + train_loss = torch.mean(train_losses) + + train_loss_sum += train_losses.sum().item() + + train_loss.backward() + if clip_max_norm is not None: + clip_grad_norm_( + self._neural_net.parameters(), max_norm=clip_max_norm + ) + self.optimizer.step() + + self.epoch += 1 + + train_loss_average = train_loss_sum / ( + len(train_loader) * train_loader.batch_size # type: ignore + ) + + # NOTE: Due to the inherently noisy nature we do instead log a exponential + # moving average of the training loss. + if len(self._summary["training_loss"]) == 0: + self._summary["training_loss"].append(train_loss_average) + else: + previous_loss = self._summary["training_loss"][-1] + self._summary["training_loss"].append( + (1.0 - ema_loss_decay) * previous_loss + + ema_loss_decay * train_loss_average + ) + + # Calculate validation performance. + self._neural_net.eval() + val_loss_sum = 0 + + with torch.no_grad(): + for batch in val_loader: + theta_batch, x_batch, masks_batch = ( + batch[0].to(self._device), + batch[1].to(self._device), + batch[2].to(self._device), + ) + # Take negative loss here to get validation log_prob. + val_losses = self._loss( + theta_batch, + x_batch, + masks_batch, + proposal, + calibration_kernel, + force_first_round_loss=force_first_round_loss, + ) + val_loss_sum += val_losses.sum().item() + + # Take mean over all validation samples. + val_loss = val_loss_sum / ( + len(val_loader) * val_loader.batch_size # type: ignore + ) + + # NOTE: Due to the inherently noisy nature we do instead log a exponential + # moving average of the validation loss. + if len(self._summary["validation_loss"]) == 0: + val_loss_ema = val_loss + else: + previous_loss = self._summary["validation_loss"][-1] + val_loss_ema = ( + 1 - ema_loss_decay + ) * previous_loss + ema_loss_decay * val_loss + + self._val_loss = val_loss_ema + self._summary["validation_loss"].append(self._val_loss) + self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time) + + self._maybe_show_progress(self._show_progress_bars, self.epoch) + + self._report_convergence_at_end(self.epoch, stop_after_epochs, max_num_epochs) + + # Update summary. + self._summary["epochs_trained"].append(self.epoch) + self._summary["best_validation_loss"].append(self._val_loss) + + # Update tensorboard and summary dict. + self._summarize(round_=self._round) + + # Update description for progress bar. + if show_train_summary: + print(self._describe_round(self._round, self._summary)) + + # Avoid keeping the gradients in the resulting network, which can + # cause memory leakage when benchmarking. + self._neural_net.zero_grad(set_to_none=True) + + return deepcopy(self._neural_net) + + def build_posterior( + self, + score_estimator: Optional[ConditionalScoreEstimator] = None, + prior: Optional[Distribution] = None, + sample_with: str = "sde", + ) -> ScorePosterior: + r"""Build posterior from the score estimator. + + For NPSE, the posterior distribution that is returned here implements the + following functionality over the raw neural density estimator: + - correct the calculation of the log probability such that it compensates for + the leakage. + - reject samples that lie outside of the prior bounds. + + Args: + score_estimator: The score estimator that the posterior is based on. + If `None`, use the latest neural score estimator that was trained. + prior: Prior distribution. + sample_with: Method to use for sampling from the posterior. Can be one of + 'sde' (default) or 'ode'. The 'sde' method uses the score to + do a Langevin diffusion step, while the 'ode' method uses the score to + define a probabilistic ODE and solves it with a numerical ODE solver. + + Returns: + Posterior $p(\theta|x)$ with `.sample()` and `.log_prob()` methods. + """ + if prior is None: + assert self._prior is not None, ( + "You did not pass a prior. You have to pass the prior either at " + "initialization `inference = NPSE(prior)` or to " + "`.build_posterior(prior=prior)`." + ) + prior = self._prior + else: + utils.check_prior(prior) + + if score_estimator is None: + score_estimator = self._neural_net + # If internal net is used device is defined. + device = self._device + # Otherwise, infer it from the device of the net parameters. + else: + # TODO: Add protocol for checking if the score estimator has forward and + # loss methods with the correct signature. + device = str(next(score_estimator.parameters()).device) + + posterior = ScorePosterior( + score_estimator, # type: ignore + prior, + device=device, + sample_with=sample_with, + ) + + self._posterior = posterior + # Store models at end of each round. + self._model_bank.append(deepcopy(self._posterior)) + + return deepcopy(self._posterior) + + def _loss_proposal_posterior( + self, + theta: Tensor, + x: Tensor, + masks: Tensor, + proposal: Optional[Any], + ) -> Tensor: + raise NotImplementedError("Multi-round NPSE is not yet implemented.") + + def _loss( + self, + theta: Tensor, + x: Tensor, + masks: Tensor, + proposal: Optional[Any], + calibration_kernel: Callable, + force_first_round_loss: bool = False, + ) -> Tensor: + """Return loss from score estimator. Currently only single-round NPSE + is implemented, i.e., no proposal correction is applied for later rounds. + + The loss is the negative log prob. Irrespective of the round or SNPE method + (A, B, or C), it can be weighted with a calibration kernel. + + Returns: + Calibration kernel-weighted negative log prob. + force_first_round_loss: If `True`, train with maximum likelihood, + i.e., potentially ignoring the correction for using a proposal + distribution different from the prior. + """ + if self._round == 0 or force_first_round_loss: + # First round loss. + loss = self._neural_net.loss(theta, x) + else: + raise NotImplementedError( + "Multi-round NPSE with arbitrary proposals is not implemented" + ) + + return calibration_kernel(x) * loss + + def _converged(self, epoch: int, stop_after_epochs: int) -> bool: + """Check if training has converged. + + Unlike the `._converged` method in base.py, this method does not reset to the + best model. We noticed that this improves performance. Deleting this method + will make C2ST tests fail. This is because the loss is very stochastic, so + resetting might reset to an underfitted model. Ideally, we would write a + custom `._converged()` method which checks whether the loss is still going + down **for all t**. + + Args: + epoch: Current epoch. + stop_after_epochs: Number of epochs to wait for improvement on the + validation set before terminating training. + + Returns: + Whether training has converged. + """ + converged = False + + # No checkpointing, just check if the validation loss has improved. + + # (Re)-start the epoch count with the first epoch or any improvement. + if epoch == 0 or self._val_loss < self._best_val_loss: + self._best_val_loss = self._val_loss + self._epochs_since_last_improvement = 0 + else: + self._epochs_since_last_improvement += 1 + + # If no validation improvement over many epochs, stop training. + if self._epochs_since_last_improvement > stop_after_epochs - 1: + converged = True + + return converged diff --git a/sbi/inference/posteriors/base_posterior.py b/sbi/inference/posteriors/base_posterior.py index a4b9d49fa..3d810cc5f 100644 --- a/sbi/inference/posteriors/base_posterior.py +++ b/sbi/inference/posteriors/base_posterior.py @@ -2,7 +2,7 @@ # under the Apache License Version 2.0, see import inspect -from abc import ABC, abstractmethod +from abc import abstractmethod from typing import Any, Callable, Dict, Optional, Union from warnings import warn @@ -20,7 +20,7 @@ from sbi.utils.user_input_checks import process_x -class NeuralPosterior(ABC): +class NeuralPosterior: r"""Posterior $p(\theta|x)$ with `log_prob()` and `sample()` methods.

All inference methods in sbi train a neural network which is then used to obtain the posterior distribution. The `NeuralPosterior` class wraps the trained network @@ -52,6 +52,7 @@ def __init__( stacklevel=2, ) + # Wrap as `CallablePotentialWrapper` if `potential_fn` is a Callable. if not isinstance(potential_fn, BasePotential): kwargs_of_callable = list(inspect.signature(potential_fn).parameters.keys()) for key in ["theta", "x_o"]: @@ -191,7 +192,6 @@ def _calculate_map( show_progress_bars: bool = False, ) -> Tensor: """Calculates the maximum-a-posteriori estimate (MAP). - See `map()` method of child classes for docstring. """ @@ -215,7 +215,6 @@ def _calculate_map( show_progress_bars=show_progress_bars, )[0] - @abstractmethod def map( self, x: Optional[Tensor] = None, @@ -228,11 +227,44 @@ def map( show_progress_bars: bool = False, force_update: bool = False, ) -> Tensor: - """Returns stored maximum-a-posterior estimate (MAP), otherwise calculates it. + r"""Returns the maximum-a-posteriori estimate (MAP). - See child classes for docstring. - """ + The MAP is obtained by running gradient + ascent from a given number of starting positions (samples from the posterior + with the highest log-probability). After the optimization is done, we select the + parameter set that has the highest log-probability after the optimization. + + Warning: The default values used by this function are not well-tested. They + might require hand-tuning for the problem at hand. + + For developers: if the prior is a `BoxUniform`, we carry out the optimization + in unbounded space and transform the result back into bounded space. + Args: + x: Deprecated - use `.set_default_x()` prior to `.map()`. + num_iter: Number of optimization steps that the algorithm takes + to find the MAP. + num_to_optimize: From the drawn `num_init_samples`, use the + `num_to_optimize` with highest log-probability as the initial points + for the optimization. + learning_rate: Learning rate of the optimizer. + init_method: How to select the starting parameters for the optimization. If + it is a string, it can be either [`posterior`, `prior`], which samples + the respective distribution `num_init_samples` times. If it is a + tensor, the tensor will be used as init locations. + num_init_samples: Draw this number of samples from the posterior and + evaluate the log-probability of all of them. + save_best_every: The best log-probability is computed, saved in the + `map`-attribute, and printed every `save_best_every`-th iteration. + Computing the best log-probability creates a significant overhead + for score-based estimators (thus, the default is `1000`.) + show_progress_bars: Whether to show a progressbar during sampling from + the posterior. + force_update: Whether to re-calculate the MAP when x is unchanged and + have a cached value. + Returns: + The MAP estimate. + """ if x is not None: raise ValueError( "Passing `x` directly to `.map()` has been deprecated." @@ -266,10 +298,8 @@ def __repr__(self): def __str__(self): desc = ( - f"Posterior conditional density p(θ|x) of type {self.__class__.__name__}. " - f"{self._purpose}" + f"Posterior p(θ|x) of type {self.__class__.__name__}. " f"{self._purpose}" ) - return desc def __getstate__(self) -> Dict: diff --git a/sbi/inference/posteriors/direct_posterior.py b/sbi/inference/posteriors/direct_posterior.py index 46d56b77d..76b9fdf48 100644 --- a/sbi/inference/posteriors/direct_posterior.py +++ b/sbi/inference/posteriors/direct_posterior.py @@ -11,8 +11,8 @@ from sbi.inference.potentials.posterior_based_potential import ( posterior_estimator_based_potential, ) -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_batch_event, reshape_to_sample_batch_event, ) diff --git a/sbi/inference/posteriors/mcmc_posterior.py b/sbi/inference/posteriors/mcmc_posterior.py index e150819bb..65f59b95c 100644 --- a/sbi/inference/posteriors/mcmc_posterior.py +++ b/sbi/inference/posteriors/mcmc_posterior.py @@ -21,7 +21,7 @@ from sbi.inference.posteriors.base_posterior import NeuralPosterior from sbi.inference.potentials.base_potential import BasePotential -from sbi.neural_nets.density_estimators.shape_handling import reshape_to_batch_event +from sbi.neural_nets.estimators.shape_handling import reshape_to_batch_event from sbi.samplers.mcmc import ( IterateParameters, PyMCSampler, diff --git a/sbi/inference/posteriors/score_posterior.py b/sbi/inference/posteriors/score_posterior.py new file mode 100644 index 000000000..d689f989f --- /dev/null +++ b/sbi/inference/posteriors/score_posterior.py @@ -0,0 +1,367 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +from typing import Dict, Optional, Union + +import torch +from torch import Tensor +from torch.distributions import Distribution + +from sbi.inference.posteriors.base_posterior import NeuralPosterior +from sbi.inference.potentials.score_based_potential import ( + PosteriorScoreBasedPotential, + score_estimator_based_potential, +) +from sbi.neural_nets.estimators.score_estimator import ConditionalScoreEstimator +from sbi.neural_nets.estimators.shape_handling import ( + reshape_to_batch_event, +) +from sbi.samplers.score.correctors import Corrector +from sbi.samplers.score.predictors import Predictor +from sbi.samplers.score.score import Diffuser +from sbi.sbi_types import Shape +from sbi.utils import check_prior +from sbi.utils.torchutils import ensure_theta_batched + + +class ScorePosterior(NeuralPosterior): + r"""Posterior $p(\theta|x_o)$ with `log_prob()` and `sample()` methods. It samples + from the diffusion model given the score_estimator and rejects samples that lie + outside of the prior bounds. + + The posterior is defined by a score estimator and a prior. The score estimator + provides the gradient of the log-posterior with respect to the parameters. The prior + is used to reject samples that lie outside of the prior bounds. + + Sampling is done by running a diffusion process with a predictor and optionally a + corrector. + + Log probabilities are obtained by calling the potential function, which in turn uses + zuko probabilistic ODEs to compute the log-probability. + """ + + def __init__( + self, + score_estimator: ConditionalScoreEstimator, + prior: Distribution, + max_sampling_batch_size: int = 10_000, + device: Optional[str] = None, + enable_transform: bool = False, + sample_with: str = "sde", + ): + """ + Args: + prior: Prior distribution with `.log_prob()` and `.sample()`. + score_estimator: The trained neural score estimator. + max_sampling_batch_size: Batchsize of samples being drawn from + the proposal at every iteration. + device: Training device, e.g., "cpu", "cuda" or "cuda:0". If None, + `potential_fn.device` is used. + enable_transform: Whether to transform parameters to unconstrained space + during MAP optimization. When False, an identity transform will be + returned for `theta_transform`. True is not supported yet. + sample_with: Whether to sample from the posterior using the ODE-based + sampler or the SDE-based sampler. + """ + + check_prior(prior) + potential_fn, theta_transform = score_estimator_based_potential( + score_estimator, + prior, + x_o=None, + enable_transform=enable_transform, + ) + super().__init__( + potential_fn=potential_fn, + theta_transform=theta_transform, + device=device, + ) + # Set the potential function type. + self.potential_fn: PosteriorScoreBasedPotential = potential_fn + + self.prior = prior + self.score_estimator = score_estimator + + self.sample_with = sample_with + assert self.sample_with in [ + "ode", + "sde", + ], f"sample_with must be 'ode' or 'sde', but is {self.sample_with}." + self.max_sampling_batch_size = max_sampling_batch_size + + self._purpose = """It samples from the diffusion model given the \ + score_estimator.""" + + def sample( + self, + sample_shape: Shape = torch.Size(), + x: Optional[Tensor] = None, + predictor: Union[str, Predictor] = "euler_maruyama", + corrector: Optional[Union[str, Corrector]] = None, + predictor_params: Optional[Dict] = None, + corrector_params: Optional[Dict] = None, + steps: int = 500, + ts: Optional[Tensor] = None, + max_sampling_batch_size: int = 10_000, + sample_with: Optional[str] = None, + show_progress_bars: bool = True, + ) -> Tensor: + r"""Return samples from posterior distribution $p(\theta|x)$. + + Args: + sample_shape: Shape of the samples to be drawn. + x: Deprecated - use `.set_default_x()` prior to `.sample()`. + predictor: The predictor for the diffusion-based sampler. Can be a string or + a custom predictor following the API in `sbi.samplers.score.predictors`. + Currently, only `euler_maruyama` is implemented. + corrector: The corrector for the diffusion-based sampler. Either of + [None]. + predictor_params: Additional parameters passed to predictor. + corrector_params: Additional parameters passed to corrector. + steps: Number of steps to take for the Euler-Maruyama method. + ts: Time points at which to evaluate the diffusion process. If None, a + linear grid between t_max and t_min is used. + max_sampling_batch_size: Maximum batch size for sampling. + sample_with: Deprecated - use `.build_posterior(sample_with=...)` prior to + `.sample()`. + show_progress_bars: Whether to show a progress bar during sampling. + """ + + if sample_with is not None: + raise ValueError( + f"You set `sample_with={sample_with}`. As of sbi v0.18.0, setting " + f"`sample_with` is no longer supported. You have to rerun " + f"`.build_posterior(sample_with={sample_with}).`" + ) + + x = self._x_else_default_x(x) + x = reshape_to_batch_event(x, self.score_estimator.condition_shape) + self.potential_fn.set_x(x) + + if self.sample_with == "ode": + samples = self.sample_via_zuko(sample_shape=sample_shape, x=x) + elif self.sample_with == "sde": + samples = self._sample_via_diffusion( + sample_shape=sample_shape, + predictor=predictor, + corrector=corrector, + predictor_params=predictor_params, + corrector_params=corrector_params, + steps=steps, + ts=ts, + max_sampling_batch_size=max_sampling_batch_size, + show_progress_bars=show_progress_bars, + ) + + return samples + + def _sample_via_diffusion( + self, + sample_shape: Shape = torch.Size(), + predictor: Union[str, Predictor] = "euler_maruyama", + corrector: Optional[Union[str, Corrector]] = None, + predictor_params: Optional[Dict] = None, + corrector_params: Optional[Dict] = None, + steps: int = 500, + ts: Optional[Tensor] = None, + max_sampling_batch_size: int = 10_000, + show_progress_bars: bool = True, + ) -> Tensor: + r"""Return samples from posterior distribution $p(\theta|x)$. + + Args: + sample_shape: Shape of the samples to be drawn. + x: Deprecated - use `.set_default_x()` prior to `.sample()`. + predictor: The predictor for the diffusion-based sampler. Can be a string or + a custom predictor following the API in `sbi.samplers.score.predictors`. + Currently, only `euler_maruyama` is implemented. + corrector: The corrector for the diffusion-based sampler. Either of + [None]. + steps: Number of steps to take for the Euler-Maruyama method. + ts: Time points at which to evaluate the diffusion process. If None, a + linear grid between t_max and t_min is used. + max_sampling_batch_size: Maximum batch size for sampling. + sample_with: Deprecated - use `.build_posterior(sample_with=...)` prior to + `.sample()`. + show_progress_bars: Whether to show a progress bar during sampling. + """ + + num_samples = torch.Size(sample_shape).numel() + + max_sampling_batch_size = ( + self.max_sampling_batch_size + if max_sampling_batch_size is None + else max_sampling_batch_size + ) + + if ts is None: + t_max = self.score_estimator.t_max + t_min = self.score_estimator.t_min + ts = torch.linspace(t_max, t_min, steps) + + diffuser = Diffuser( + self.potential_fn, + predictor=predictor, + corrector=corrector, + predictor_params=predictor_params, + corrector_params=corrector_params, + ) + max_sampling_batch_size = min(max_sampling_batch_size, num_samples) + samples = [] + num_iter = num_samples // max_sampling_batch_size + num_iter = ( + num_iter + 1 if (num_samples % max_sampling_batch_size) != 0 else num_iter + ) + for _ in range(num_iter): + samples.append( + diffuser.run( + num_samples=max_sampling_batch_size, + ts=ts, + show_progress_bars=show_progress_bars, + ) + ) + samples = torch.cat(samples, dim=0)[:num_samples] + + return samples.reshape(sample_shape + self.score_estimator.input_shape) + + def sample_via_zuko( + self, + x: Tensor, + sample_shape: Shape = torch.Size(), + ) -> Tensor: + r"""Return samples from posterior distribution with probability flow ODE. + + This build the probability flow ODE and then samples from the corresponding + flow. This is implemented via the zuko library. + + Args: + x: Condition. + sample_shape: The shape of the samples to be returned. + + Returns: + Samples. + """ + num_samples = torch.Size(sample_shape).numel() + + flow = self.potential_fn.get_continuous_normalizing_flow(condition=x) + samples = flow.sample(torch.Size((num_samples,))) + + return samples.reshape(sample_shape + self.score_estimator.input_shape) + + def log_prob( + self, + theta: Tensor, + x: Optional[Tensor] = None, + track_gradients: bool = False, + atol: float = 1e-5, + rtol: float = 1e-6, + exact: bool = True, + ) -> Tensor: + r"""Returns the log-probability of the posterior $p(\theta|x)$. + + This requires building and evaluating the probability flow ODE. + + Args: + theta: Parameters $\theta$. + x: Observed data $x_o$. If None, the default $x_o$ is used. + track_gradients: Whether the returned tensor supports tracking gradients. + This can be helpful for e.g. sensitivity analysis, but increases memory + consumption. + atol: Absolute tolerance for the ODE solver. + rtol: Relative tolerance for the ODE solver. + exact: Whether to use the exact Jacobian of the transformation or an + stochastic approximation, which is faster but less accurate. + + Returns: + `(len(θ),)`-shaped log posterior probability $\log p(\theta|x)$ for θ in the + support of the prior, -∞ (corresponding to 0 probability) outside. + """ + self.potential_fn.set_x(self._x_else_default_x(x)) + + theta = ensure_theta_batched(torch.as_tensor(theta)) + return self.potential_fn( + theta.to(self._device), + track_gradients=track_gradients, + atol=atol, + rtol=rtol, + exact=exact, + ) + + def sample_batched( + self, + sample_shape: torch.Size, + x: Tensor, + max_sampling_batch_size: int = 10000, + show_progress_bars: bool = True, + ) -> Tensor: + raise NotImplementedError( + "Batched sampling is not implemented for ScorePosterior." + ) + + def map( + self, + x: Optional[Tensor] = None, + num_iter: int = 1000, + num_to_optimize: int = 1000, + learning_rate: float = 1e-5, + init_method: Union[str, Tensor] = "posterior", + num_init_samples: int = 1000, + save_best_every: int = 1000, + show_progress_bars: bool = False, + force_update: bool = False, + ) -> Tensor: + r"""Returns the maximum-a-posteriori estimate (MAP). + + The method can be interrupted (Ctrl-C) when the user sees that the + log-probability converges. The best estimate will be saved in `self._map` and + can be accessed with `self.map()`. The MAP is obtained by running gradient + ascent from a given number of starting positions (samples from the posterior + with the highest log-probability). After the optimization is done, we select the + parameter set that has the highest log-probability after the optimization. + + Warning: The default values used by this function are not well-tested. They + might require hand-tuning for the problem at hand. + + For developers: if the prior is a `BoxUniform`, we carry out the optimization + in unbounded space and transform the result back into bounded space. + + Args: + x: Deprecated - use `.set_default_x()` prior to `.map()`. + num_iter: Number of optimization steps that the algorithm takes + to find the MAP. + num_to_optimize: From the drawn `num_init_samples`, use the + `num_to_optimize` with highest log-probability as the initial points + for the optimization. + learning_rate: Learning rate of the optimizer. + init_method: How to select the starting parameters for the optimization. If + it is a string, it can be either [`posterior`, `prior`], which samples + the respective distribution `num_init_samples` times. If it is a + tensor, the tensor will be used as init locations. + num_init_samples: Draw this number of samples from the posterior and + evaluate the log-probability of all of them. + save_best_every: The best log-probability is computed, saved in the + `map`-attribute, and printed every `save_best_every`-th iteration. + Computing the best log-probability creates a significant overhead + (thus, the default is `10`.) + show_progress_bars: Whether to show a progressbar during sampling from + the posterior. + force_update: Whether to re-calculate the MAP when x is unchanged and + have a cached value. + + Returns: + The MAP estimate. + """ + raise NotImplementedError( + "MAP estimation is currently not working accurately for ScorePosterior." + ) + return super().map( + x=x, + num_iter=num_iter, + num_to_optimize=num_to_optimize, + learning_rate=learning_rate, + init_method=init_method, + num_init_samples=num_init_samples, + save_best_every=save_best_every, + show_progress_bars=show_progress_bars, + force_update=force_update, + ) diff --git a/sbi/inference/potentials/base_potential.py b/sbi/inference/potentials/base_potential.py index 769031321..f7f9dfe41 100644 --- a/sbi/inference/potentials/base_potential.py +++ b/sbi/inference/potentials/base_potential.py @@ -35,6 +35,11 @@ def __init__( def __call__(self, theta: Tensor, track_gradients: bool = True) -> Tensor: raise NotImplementedError + def gradient( + self, theta: Tensor, time: Optional[Tensor] = None, track_gradients: bool = True + ) -> Tensor: + raise NotImplementedError + @property def x_is_iid(self) -> bool: """If x has batch dimension greater than 1, whether to intepret the batch as iid diff --git a/sbi/inference/potentials/likelihood_based_potential.py b/sbi/inference/potentials/likelihood_based_potential.py index c824a5dc5..11101975d 100644 --- a/sbi/inference/potentials/likelihood_based_potential.py +++ b/sbi/inference/potentials/likelihood_based_potential.py @@ -8,8 +8,8 @@ from torch.distributions import Distribution from sbi.inference.potentials.base_potential import BasePotential -from sbi.neural_nets.density_estimators import ConditionalDensityEstimator -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.estimators import ConditionalDensityEstimator +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_batch_event, reshape_to_sample_batch_event, ) diff --git a/sbi/inference/potentials/posterior_based_potential.py b/sbi/inference/potentials/posterior_based_potential.py index 9272b2d68..000d1f89f 100644 --- a/sbi/inference/potentials/posterior_based_potential.py +++ b/sbi/inference/potentials/posterior_based_potential.py @@ -9,8 +9,8 @@ from torch.distributions import Distribution from sbi.inference.potentials.base_potential import BasePotential -from sbi.neural_nets.density_estimators import ConditionalDensityEstimator -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.estimators import ConditionalDensityEstimator +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_batch_event, reshape_to_sample_batch_event, ) diff --git a/sbi/inference/potentials/score_based_potential.py b/sbi/inference/potentials/score_based_potential.py new file mode 100644 index 000000000..5dcf7b5a7 --- /dev/null +++ b/sbi/inference/potentials/score_based_potential.py @@ -0,0 +1,231 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +from typing import Optional, Tuple + +import torch +from torch import Tensor +from torch.distributions import Distribution +from zuko.distributions import NormalizingFlow +from zuko.transforms import FreeFormJacobianTransform + +from sbi.inference.potentials.base_potential import BasePotential +from sbi.neural_nets.estimators.score_estimator import ConditionalScoreEstimator +from sbi.neural_nets.estimators.shape_handling import ( + reshape_to_batch_event, + reshape_to_sample_batch_event, +) +from sbi.sbi_types import TorchTransform +from sbi.utils import mcmc_transform +from sbi.utils.sbiutils import within_support +from sbi.utils.torchutils import ensure_theta_batched + + +def score_estimator_based_potential( + score_estimator: ConditionalScoreEstimator, + prior: Optional[Distribution], + x_o: Optional[Tensor], + enable_transform: bool = False, +) -> Tuple["PosteriorScoreBasedPotential", TorchTransform]: + r"""Returns the potential function gradient for score estimators. + + Args: + score_estimator: The neural network modelling the score. + prior: The prior distribution. + x_o: The observed data at which to evaluate the score. + enable_transform: Whether to enable transforms. Not supported yet. + """ + device = str(next(score_estimator.parameters()).device) + + potential_fn = PosteriorScoreBasedPotential( + score_estimator, prior, x_o, device=device + ) + + assert ( + enable_transform is False + ), "Transforms are not yet supported for score estimators." + + if prior is not None: + theta_transform = mcmc_transform( + prior, device=device, enable_transform=enable_transform + ) + else: + theta_transform = torch.distributions.transforms.identity_transform + + return potential_fn, theta_transform + + +class PosteriorScoreBasedPotential(BasePotential): + def __init__( + self, + score_estimator: ConditionalScoreEstimator, + prior: Optional[Distribution], + x_o: Optional[Tensor], + iid_method: str = "iid_bridge", + device: str = "cpu", + ): + r"""Returns the score function for score-based methods. + + Args: + score_estimator: The neural network modelling the score. + prior: The prior distribution. + x_o: The observed data at which to evaluate the posterior. + iid_method: Which method to use for computing the score. Currently, only + `iid_bridge` as proposed in Geffner et al. is implemented. + device: The device on which to evaluate the potential. + """ + + super().__init__(prior, x_o, device=device) + self.score_estimator = score_estimator + self.score_estimator.eval() + self.iid_method = iid_method + + def __call__( + self, + theta: Tensor, + track_gradients: bool = True, + atol: float = 1e-5, + rtol: float = 1e-6, + exact: bool = True, + ) -> Tensor: + """Return the potential (posterior log prob) via probability flow ODE. + + Args: + theta: The parameters at which to evaluate the potential. + track_gradients: Whether to track gradients. + atol: Absolute tolerance for the ODE solver. + rtol: Relative tolerance for the ODE solver. + exact: Whether to use the exact ODE solver. + + Returns: + The potential function, i.e., the log probability of the posterior. + """ + theta = ensure_theta_batched(torch.as_tensor(theta)) + theta_density_estimator = reshape_to_sample_batch_event( + theta, theta.shape[1:], leading_is_sample=True + ) + x_density_estimator = reshape_to_batch_event( + self.x_o, event_shape=self.score_estimator.condition_shape + ) + assert ( + x_density_estimator.shape[0] == 1 + ), "PosteriorScoreBasedPotential supports only x batchsize of 1`." + + self.score_estimator.eval() + + flow = self.get_continuous_normalizing_flow( + condition=x_density_estimator, atol=atol, rtol=rtol, exact=exact + ) + + with torch.set_grad_enabled(track_gradients): + log_probs = flow.log_prob(theta_density_estimator).squeeze(-1) + # Force probability to be zero outside prior support. + in_prior_support = within_support(self.prior, theta) + + masked_log_prob = torch.where( + in_prior_support, + log_probs, + torch.tensor(float("-inf"), dtype=torch.float32, device=self.device), + ) + return masked_log_prob + + def gradient( + self, theta: Tensor, time: Optional[Tensor] = None, track_gradients: bool = True + ) -> Tensor: + r"""Returns the potential function gradient for score-based methods. + + Args: + theta: The parameters at which to evaluate the potential. + time: The diffusion time. If None, then `t_min` of the + self.score_estimator is used (i.e. we evaluate the gradient of the + actual data distribution). + track_gradients: Whether to track gradients. + + Returns: + The gradient of the potential function. + """ + if time is None: + time = torch.tensor([self.score_estimator.t_min]) + + if self._x_o is None: + raise ValueError( + "No observed data x_o is available. Please reinitialize \ + the potential or manually set self._x_o." + ) + + with torch.set_grad_enabled(track_gradients): + if not self.x_is_iid or self._x_o.shape[0] == 1: + score = self.score_estimator.forward( + input=theta, condition=self.x_o, time=time + ) + else: + raise NotImplementedError( + "Score accumulation for IID data is not yet implemented." + ) + + return score + + def get_continuous_normalizing_flow( + self, + condition: Tensor, + atol: float = 1e-5, + rtol: float = 1e-6, + exact: bool = True, + ) -> NormalizingFlow: + r"""Returns the normalizing flow for the score-based estimator.""" + + # Compute the base density + mean_t = self.score_estimator.mean_t + std_t = self.score_estimator.std_t + base_density = torch.distributions.Normal(mean_t, std_t) + # TODO: is this correct? should we use append base_density for each dimension? + for _ in range(len(self.score_estimator.input_shape)): + base_density = torch.distributions.Independent(base_density, 1) + + # Build the freeform jacobian transformation by probability flow ODEs + transform = build_freeform_jacobian_transform( + self.score_estimator, condition, atol=atol, rtol=rtol, exact=exact + ) + # Use zuko to build the normalizing flow. + return NormalizingFlow(transform, base=base_density) + + +def build_freeform_jacobian_transform( + score_estimator: ConditionalScoreEstimator, + x_o: Tensor, + atol: float = 1e-5, + rtol: float = 1e-6, + exact: bool = True, +) -> FreeFormJacobianTransform: + """Builds the free-form Jacobian for the probability flow ODE, used for log-prob. + + Args: + score_estimator: The neural network estimating the score. + x_o: Observation. + atol: Absolute tolerance for the ODE solver. + rtol: Relative tolerance for the ODE solver. + exact: Whether to use the exact ODE solver. + + Returns: + Transformation of probability flow ODE. + """ + # Create a freeform jacobian transformation + phi = (x_o, *score_estimator.parameters()) + + def f(t, x): + score = score_estimator(input=x, condition=x_o, time=t) + f = score_estimator.drift_fn(x, t) + g = score_estimator.diffusion_fn(x, t) + v = f - 0.5 * g**2 * score + return v + + transform = FreeFormJacobianTransform( + f=f, + t0=score_estimator.t_min, + t1=score_estimator.t_max, + phi=phi, + atol=atol, + rtol=rtol, + exact=exact, + ) + return transform diff --git a/sbi/inference/snle/snle_base.py b/sbi/inference/snle/snle_base.py index d05353134..ae036526d 100644 --- a/sbi/inference/snle/snle_base.py +++ b/sbi/inference/snle/snle_base.py @@ -17,7 +17,7 @@ from sbi.inference.posteriors.importance_posterior import ImportanceSamplingPosterior from sbi.inference.potentials import likelihood_estimator_based_potential from sbi.neural_nets import ConditionalDensityEstimator, likelihood_nn -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_batch_event, ) from sbi.utils import check_estimator_arg, check_prior, x_shape_from_simulation @@ -187,14 +187,14 @@ def train( list(self._neural_net.parameters()), lr=learning_rate, ) - self.epoch, self._val_log_prob = 0, float("-Inf") + self.epoch, self._val_loss = 0, float("Inf") while self.epoch <= max_num_epochs and not self._converged( self.epoch, stop_after_epochs ): # Train for a single epoch. self._neural_net.train() - train_log_probs_sum = 0 + train_loss_sum = 0 for batch in train_loader: self.optimizer.zero_grad() theta_batch, x_batch = ( @@ -204,7 +204,7 @@ def train( # Evaluate on x with theta as context. train_losses = self._loss(theta=theta_batch, x=x_batch) train_loss = torch.mean(train_losses) - train_log_probs_sum -= train_losses.sum().item() + train_loss_sum += train_losses.sum().item() train_loss.backward() if clip_max_norm is not None: @@ -216,14 +216,14 @@ def train( self.epoch += 1 - train_log_prob_average = train_log_probs_sum / ( + train_loss_average = train_loss_sum / ( len(train_loader) * train_loader.batch_size # type: ignore ) - self._summary["training_log_probs"].append(train_log_prob_average) + self._summary["training_loss"].append(train_loss_average) # Calculate validation performance. self._neural_net.eval() - val_log_prob_sum = 0 + val_loss_sum = 0 with torch.no_grad(): for batch in val_loader: theta_batch, x_batch = ( @@ -232,14 +232,14 @@ def train( ) # Evaluate on x with theta as context. val_losses = self._loss(theta=theta_batch, x=x_batch) - val_log_prob_sum -= val_losses.sum().item() + val_loss_sum += val_losses.sum().item() # Take mean over all validation samples. - self._val_log_prob = val_log_prob_sum / ( + self._val_loss = val_loss_sum / ( len(val_loader) * val_loader.batch_size # type: ignore ) - # Log validation log prob for every epoch. - self._summary["validation_log_probs"].append(self._val_log_prob) + # Log validation loss for every epoch. + self._summary["validation_loss"].append(self._val_loss) self._maybe_show_progress(self._show_progress_bars, self.epoch) @@ -247,7 +247,7 @@ def train( # Update summary. self._summary["epochs_trained"].append(self.epoch) - self._summary["best_validation_log_prob"].append(self._best_val_log_prob) + self._summary["best_validation_loss"].append(self._best_val_loss) # Update TensorBoard and summary dict. self._summarize(round_=self._round) diff --git a/sbi/inference/snpe/snpe_a.py b/sbi/inference/snpe/snpe_a.py index 64e72fd58..15add3393 100644 --- a/sbi/inference/snpe/snpe_a.py +++ b/sbi/inference/snpe/snpe_a.py @@ -14,7 +14,7 @@ from sbi.inference.posteriors.direct_posterior import DirectPosterior from sbi.inference.snpe.snpe_base import PosteriorEstimator -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator from sbi.sbi_types import TensorboardSummaryWriter, TorchModule from sbi.utils import torchutils from sbi.utils.sbiutils import ( diff --git a/sbi/inference/snpe/snpe_base.py b/sbi/inference/snpe/snpe_base.py index 55030229d..c53ee55d1 100644 --- a/sbi/inference/snpe/snpe_base.py +++ b/sbi/inference/snpe/snpe_base.py @@ -25,7 +25,7 @@ from sbi.inference.posteriors.importance_posterior import ImportanceSamplingPosterior from sbi.inference.potentials import posterior_estimator_based_potential from sbi.neural_nets import ConditionalDensityEstimator, posterior_nn -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_batch_event, reshape_to_sample_batch_event, ) @@ -336,14 +336,14 @@ def default_calibration_kernel(x): if not resume_training: self.optimizer = Adam(list(self._neural_net.parameters()), lr=learning_rate) - self.epoch, self._val_log_prob = 0, float("-Inf") + self.epoch, self._val_loss = 0, float("Inf") while self.epoch <= max_num_epochs and not self._converged( self.epoch, stop_after_epochs ): # Train for a single epoch. self._neural_net.train() - train_log_probs_sum = 0 + train_loss_sum = 0 epoch_start_time = time.time() for batch in train_loader: self.optimizer.zero_grad() @@ -363,7 +363,7 @@ def default_calibration_kernel(x): force_first_round_loss=force_first_round_loss, ) train_loss = torch.mean(train_losses) - train_log_probs_sum -= train_losses.sum().item() + train_loss_sum += train_losses.sum().item() train_loss.backward() if clip_max_norm is not None: @@ -374,14 +374,14 @@ def default_calibration_kernel(x): self.epoch += 1 - train_log_prob_average = train_log_probs_sum / ( + train_loss_average = train_loss_sum / ( len(train_loader) * train_loader.batch_size # type: ignore ) - self._summary["training_log_probs"].append(train_log_prob_average) + self._summary["training_loss"].append(train_loss_average) # Calculate validation performance. self._neural_net.eval() - val_log_prob_sum = 0 + val_loss_sum = 0 with torch.no_grad(): for batch in val_loader: @@ -399,14 +399,14 @@ def default_calibration_kernel(x): calibration_kernel, force_first_round_loss=force_first_round_loss, ) - val_log_prob_sum -= val_losses.sum().item() + val_loss_sum += val_losses.sum().item() # Take mean over all validation samples. - self._val_log_prob = val_log_prob_sum / ( + self._val_loss = val_loss_sum / ( len(val_loader) * val_loader.batch_size # type: ignore ) - # Log validation log prob for every epoch. - self._summary["validation_log_probs"].append(self._val_log_prob) + # Log validation loss for every epoch. + self._summary["validation_loss"].append(self._val_loss) self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time) self._maybe_show_progress(self._show_progress_bars, self.epoch) @@ -415,7 +415,7 @@ def default_calibration_kernel(x): # Update summary. self._summary["epochs_trained"].append(self.epoch) - self._summary["best_validation_log_prob"].append(self._best_val_log_prob) + self._summary["best_validation_loss"].append(self._best_val_loss) # Update tensorboard and summary dict. self._summarize(round_=self._round) diff --git a/sbi/inference/snpe/snpe_c.py b/sbi/inference/snpe/snpe_c.py index e72b25e33..812908a48 100644 --- a/sbi/inference/snpe/snpe_c.py +++ b/sbi/inference/snpe/snpe_c.py @@ -11,7 +11,7 @@ from sbi.inference.posteriors.direct_posterior import DirectPosterior from sbi.inference.snpe.snpe_base import PosteriorEstimator -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_batch_event, reshape_to_sample_batch_event, ) diff --git a/sbi/inference/snre/snre_base.py b/sbi/inference/snre/snre_base.py index 522e8ab56..dff310467 100644 --- a/sbi/inference/snre/snre_base.py +++ b/sbi/inference/snre/snre_base.py @@ -212,14 +212,14 @@ def train( list(self._neural_net.parameters()), lr=learning_rate, ) - self.epoch, self._val_log_prob = 0, float("-Inf") + self.epoch, self._val_loss = 0, float("Inf") while self.epoch <= max_num_epochs and not self._converged( self.epoch, stop_after_epochs ): # Train for a single epoch. self._neural_net.train() - train_log_probs_sum = 0 + train_loss_sum = 0 for batch in train_loader: self.optimizer.zero_grad() theta_batch, x_batch = ( @@ -231,7 +231,7 @@ def train( theta_batch, x_batch, num_atoms, **loss_kwargs ) train_loss = torch.mean(train_losses) - train_log_probs_sum -= train_losses.sum().item() + train_loss_sum += train_losses.sum().item() train_loss.backward() if clip_max_norm is not None: @@ -243,14 +243,14 @@ def train( self.epoch += 1 - train_log_prob_average = train_log_probs_sum / ( + train_loss_average = train_loss_sum / ( len(train_loader) * train_loader.batch_size # type: ignore ) - self._summary["training_log_probs"].append(train_log_prob_average) + self._summary["training_loss"].append(train_loss_average) # Calculate validation performance. self._neural_net.eval() - val_log_prob_sum = 0 + val_loss_sum = 0 with torch.no_grad(): for batch in val_loader: theta_batch, x_batch = ( @@ -260,13 +260,13 @@ def train( val_losses = self._loss( theta_batch, x_batch, num_atoms, **loss_kwargs ) - val_log_prob_sum -= val_losses.sum().item() + val_loss_sum += val_losses.sum().item() # Take mean over all validation samples. - self._val_log_prob = val_log_prob_sum / ( + self._val_loss = val_loss_sum / ( len(val_loader) * val_loader.batch_size # type: ignore ) # Log validation log prob for every epoch. - self._summary["validation_log_probs"].append(self._val_log_prob) + self._summary["validation_loss"].append(self._val_loss) self._maybe_show_progress(self._show_progress_bars, self.epoch) @@ -274,7 +274,7 @@ def train( # Update summary. self._summary["epochs_trained"].append(self.epoch) - self._summary["best_validation_log_prob"].append(self._best_val_log_prob) + self._summary["best_validation_loss"].append(self._best_val_loss) # Update TensorBoard and summary dict. self._summarize(round_=self._round) diff --git a/sbi/neural_nets/__init__.py b/sbi/neural_nets/__init__.py index e6d7a7839..1d521bf5e 100644 --- a/sbi/neural_nets/__init__.py +++ b/sbi/neural_nets/__init__.py @@ -3,12 +3,12 @@ build_mlp_classifier, build_resnet_classifier, ) -from sbi.neural_nets.density_estimators import ConditionalDensityEstimator, NFlowsFlow from sbi.neural_nets.embedding_nets import ( CNNEmbedding, FCEmbedding, PermutationInvariantEmbedding, ) +from sbi.neural_nets.estimators import ConditionalDensityEstimator, NFlowsFlow from sbi.neural_nets.factory import ( classifier_nn, flowmatching_nn, diff --git a/sbi/neural_nets/categorial.py b/sbi/neural_nets/categorial.py index 1e84b5731..0bf32c687 100644 --- a/sbi/neural_nets/categorial.py +++ b/sbi/neural_nets/categorial.py @@ -5,7 +5,7 @@ from torch import Tensor, nn, unique -from sbi.neural_nets.density_estimators import CategoricalMassEstimator, CategoricalNet +from sbi.neural_nets.estimators import CategoricalMassEstimator, CategoricalNet from sbi.utils.nn_utils import get_numel from sbi.utils.sbiutils import ( standardizing_net, diff --git a/sbi/neural_nets/density_estimators/__init__.py b/sbi/neural_nets/density_estimators/__init__.py deleted file mode 100644 index 4f96bbb53..000000000 --- a/sbi/neural_nets/density_estimators/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator -from sbi.neural_nets.density_estimators.categorical_net import ( - CategoricalMassEstimator, - CategoricalNet, -) -from sbi.neural_nets.density_estimators.flowmatching_estimator import ( - FlowMatchingEstimator, -) -from sbi.neural_nets.density_estimators.mixed_density_estimator import ( - MixedDensityEstimator, -) -from sbi.neural_nets.density_estimators.nflows_flow import NFlowsFlow -from sbi.neural_nets.density_estimators.zuko_flow import ZukoFlow diff --git a/sbi/neural_nets/embedding_nets.py b/sbi/neural_nets/embedding_nets.py index 6365d211f..63f639b7b 100644 --- a/sbi/neural_nets/embedding_nets.py +++ b/sbi/neural_nets/embedding_nets.py @@ -4,6 +4,7 @@ from typing import List, Optional, Tuple, Union import torch +from numpy import pi from torch import Tensor, nn @@ -309,3 +310,18 @@ def forward(self, x: Tensor) -> Tensor: # add number of trials as additional input return self.fc_subnet(torch.cat([combined_embedding, trial_counts], dim=1)) + + +class GaussianFourierTimeEmbedding(nn.Module): + """Gaussian random features for encoding time steps.""" + + def __init__(self, embed_dim=256, scale=30.0): + super().__init__() + # Randomly sample weights during initialization. These weights are fixed + # during optimization and are not trainable. + self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False) + + def forward(self, times: Tensor): + times_proj = times[:, None] * self.W[None, :] * 2 * pi + embedding = torch.cat([torch.sin(times_proj), torch.cos(times_proj)], dim=-1) + return torch.squeeze(embedding, dim=1) diff --git a/sbi/neural_nets/estimators/__init__.py b/sbi/neural_nets/estimators/__init__.py new file mode 100644 index 000000000..1d67308f4 --- /dev/null +++ b/sbi/neural_nets/estimators/__init__.py @@ -0,0 +1,10 @@ +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.categorical_net import ( + CategoricalMassEstimator, + CategoricalNet, +) +from sbi.neural_nets.estimators.mixed_density_estimator import ( + MixedDensityEstimator, +) +from sbi.neural_nets.estimators.nflows_flow import NFlowsFlow +from sbi.neural_nets.estimators.zuko_flow import ZukoFlow diff --git a/sbi/neural_nets/density_estimators/base.py b/sbi/neural_nets/estimators/base.py similarity index 85% rename from sbi/neural_nets/density_estimators/base.py rename to sbi/neural_nets/estimators/base.py index 45d50ff7f..cc1438447 100644 --- a/sbi/neural_nets/density_estimators/base.py +++ b/sbi/neural_nets/estimators/base.py @@ -221,3 +221,44 @@ def sample_and_log_prob( samples = self.sample(sample_shape, condition, **kwargs) log_probs = self.log_prob(samples, condition, **kwargs) return samples, log_probs + + +class ConditionalVectorFieldEstimator(ConditionalEstimator): + r"""Base class for vector field (e.g., score and ODE flow) estimators. + + The density estimator class is a wrapper around neural networks that + allows to evaluate the `vector_field`, and provide the `loss` of $\theta,x$ + pairs. Here $\theta$ would be the `input` and $x$ would be the `condition`. + + Note: + We assume that the input to the density estimator is a tensor of shape + (batch_size, input_size), where input_size is the dimensionality of the input. + The condition is a tensor of shape (batch_size, *condition_shape), where + condition_shape is the shape of the condition tensor. + + """ + + def __init__( + self, net: nn.Module, input_shape: torch.Size, condition_shape: torch.Size + ) -> None: + r"""Base class for vector field estimators. + + Args: + net: Neural network. + condition_shape: Shape of the condition. If not provided, it will assume a + 1D input. + """ + super().__init__(input_shape, condition_shape) + self.net = net + + @abstractmethod + def forward(self, input: Tensor, condition: Tensor, **kwargs) -> Tensor: + """Forward pass of the score estimator. + + Args: + input: variable whose distribution is estimated. + condition: Conditioning variable. + + Raises: + NotImplementedError: This method should be implemented by sub-classes. + """ diff --git a/sbi/neural_nets/density_estimators/categorical_net.py b/sbi/neural_nets/estimators/categorical_net.py similarity index 96% rename from sbi/neural_nets/density_estimators/categorical_net.py rename to sbi/neural_nets/estimators/categorical_net.py index 63e496698..e1f3ea8ca 100644 --- a/sbi/neural_nets/density_estimators/categorical_net.py +++ b/sbi/neural_nets/estimators/categorical_net.py @@ -1,3 +1,6 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + from typing import Optional import torch @@ -5,7 +8,7 @@ from torch.distributions import Categorical from torch.nn import Sigmoid, Softmax -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator class CategoricalNet(nn.Module): diff --git a/sbi/neural_nets/density_estimators/flowmatching_estimator.py b/sbi/neural_nets/estimators/flowmatching_estimator.py similarity index 98% rename from sbi/neural_nets/density_estimators/flowmatching_estimator.py rename to sbi/neural_nets/estimators/flowmatching_estimator.py index 3500b732c..8b6494054 100644 --- a/sbi/neural_nets/density_estimators/flowmatching_estimator.py +++ b/sbi/neural_nets/estimators/flowmatching_estimator.py @@ -11,7 +11,7 @@ from zuko.transforms import FreeFormJacobianTransform from zuko.utils import broadcast -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator # abstract class to ensure forward signature for flow matching networks diff --git a/sbi/neural_nets/density_estimators/mixed_density_estimator.py b/sbi/neural_nets/estimators/mixed_density_estimator.py similarity index 98% rename from sbi/neural_nets/density_estimators/mixed_density_estimator.py rename to sbi/neural_nets/estimators/mixed_density_estimator.py index 5369e3547..f251adc23 100644 --- a/sbi/neural_nets/density_estimators/mixed_density_estimator.py +++ b/sbi/neural_nets/estimators/mixed_density_estimator.py @@ -6,8 +6,8 @@ import torch from torch import Tensor, nn -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator -from sbi.neural_nets.density_estimators.categorical_net import CategoricalMassEstimator +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.categorical_net import CategoricalMassEstimator class MixedDensityEstimator(ConditionalDensityEstimator): diff --git a/sbi/neural_nets/density_estimators/nflows_flow.py b/sbi/neural_nets/estimators/nflows_flow.py similarity index 98% rename from sbi/neural_nets/density_estimators/nflows_flow.py rename to sbi/neural_nets/estimators/nflows_flow.py index 198a66776..8edd9763b 100644 --- a/sbi/neural_nets/density_estimators/nflows_flow.py +++ b/sbi/neural_nets/estimators/nflows_flow.py @@ -7,7 +7,7 @@ from pyknos.nflows.flows import Flow from torch import Tensor, nn -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator from sbi.sbi_types import Shape diff --git a/sbi/neural_nets/estimators/score_estimator.py b/sbi/neural_nets/estimators/score_estimator.py new file mode 100644 index 000000000..4b01a0267 --- /dev/null +++ b/sbi/neural_nets/estimators/score_estimator.py @@ -0,0 +1,654 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +import math +from typing import Callable, Optional, Union + +import torch +from torch import Tensor, nn + +from sbi.neural_nets.estimators.base import ConditionalVectorFieldEstimator + + +class ConditionalScoreEstimator(ConditionalVectorFieldEstimator): + r"""Score matching for score-based generative models (e.g., denoising diffusion). + The estimator neural network (this class) learns the score function, i.e., gradient + of the conditional probability density with respect to the input, which can be used + to generate samples from the target distribution by solving the SDE starting from + the base (Gaussian) distribution. + + We assume the following SDE: + dx = A(t)xdt + B(t)dW, + where A(t) and B(t) are the drift and diffusion functions, respectively, and dW is + a Wiener process. This will lead to marginal distribution of the form: + p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)), + where mean_t(t) and std_t(t) are the conditional mean and standard deviation at a + given time t, respectively. + + Relevant literature: + - Score-based generative modeling through SDE: https://arxiv.org/abs/2011.13456 + - Denoising diffusion probabilistic models: https://arxiv.org/abs/2006.11239 + - Noise conditional score networks: https://arxiv.org/abs/1907.05600 + + NOTE: This will follow the "noise matching" approach, we could also train a + "denoising" network aiming to predict the original input given the noised input. We + can still approx. the score by Tweedie's formula, but training might be easier. + """ + + def __init__( + self, + net: nn.Module, + input_shape: torch.Size, + condition_shape: torch.Size, + weight_fn: Union[str, Callable] = "max_likelihood", + mean_0: Union[Tensor, float] = 0.0, + std_0: Union[Tensor, float] = 1.0, + t_min: float = 1e-3, + t_max: float = 1.0, + ) -> None: + r"""Score estimator class that estimates the conditional score function, i.e., + gradient of the density p(xt|x0). + + Args: + net: Score estimator neural network with call signature: input, condition, + and time (in [0,1])]. + condition_shape: Shape of the conditioning variable. + weight_fn: Function to compute the weights over time. Can be one of the + following: + - "identity": constant weights (1.), + - "max_likelihood": weights proportional to the diffusion function, or + - a custom function that returns a Callable. + + """ + super().__init__(net, input_shape, condition_shape) + + # Set lambdas (variance weights) function. + self._set_weight_fn(weight_fn) + + # Min time for diffusion (0 can be numerically unstable). + self.t_min = t_min + self.t_max = t_max + + # Starting mean and std of the target distribution (otherwise assumes 0,1). + # This will be used to precondition the score network to improve training. + if not isinstance(mean_0, Tensor): + mean_0 = torch.tensor([mean_0]) + if not isinstance(std_0, Tensor): + std_0 = torch.tensor([std_0]) + + self.register_buffer("mean_0", mean_0.clone().detach()) + self.register_buffer("std_0", std_0.clone().detach()) + + # We estimate the mean and std of the source distribution at time t_max. + mean_t = self.approx_marginal_mean(torch.tensor([t_max])) + std_t = self.approx_marginal_std(torch.tensor([t_max])) + self.register_buffer("mean_t", mean_t) + self.register_buffer("std_t", std_t) + + def forward(self, input: Tensor, condition: Tensor, time: Tensor) -> Tensor: + r"""Forward pass of the score estimator network to compute the conditional score + at a given time. + + Args: + input: Original data, x0. (input_batch_shape, *input_shape) + condition: Conditioning variable. (condition_batch_shape, *condition_shape) + times: SDE time variable in [0,1]. + + Returns: + Score (gradient of the density) at a given time, matches input shape. + """ + batch_shape = torch.broadcast_shapes( + input.shape[: -len(self.input_shape)], + condition.shape[: -len(self.condition_shape)], + ) + + input = torch.broadcast_to(input, batch_shape + self.input_shape) + condition = torch.broadcast_to(condition, batch_shape + self.condition_shape) + time = torch.broadcast_to(time, batch_shape) + + # Time dependent mean and std of the target distribution to z-score the input + # and to approximate the score at the end of the diffusion. + mean = self.approx_marginal_mean(time) + std = self.approx_marginal_std(time) + + # As input to the neural net we want to have something that changes proportianl + # to how the scores change + time_enc = self.std_fn(time) + + # Time dependent z-scoring! Keeps input at similar scales + input_enc = (input - mean) / std + + # Approximate score becoming exact for t -> t_max, "skip connection" + score_gaussian = (input - mean) / std**2 + + # Score prediction by the network + score_pred = self.net(input_enc, condition, time_enc) + + # Output pre-conditioned score + # The learnable part will be largly scaled at the beginning of the diffusion + # and the gaussian part (where it should end up) will dominate at the end of + # the diffusion. + scale = self.mean_t_fn(time) / self.std_fn(time) + output_score = -scale * score_pred - score_gaussian + + return output_score + + def loss( + self, + input: Tensor, + condition: Tensor, + times: Optional[Tensor] = None, + control_variate=True, + control_variate_threshold=torch.inf, + ) -> Tensor: + r"""Defines the denoising score matching loss (e.g., from Song et al., ICLR + 2021). A random diffusion time is sampled from [0,1], and the network is trained + to predict thescore of the true conditional distribution given the noised input, + which is equivalent to predicting the (scaled) Gaussian noise added to the + input. + + Args: + input: Input variable i.e. theta. + condition: Conditioning variable. + times: SDE time variable in [t_min, t_max]. Uniformly sampled if None. + control_variate: Whether to use a control variate to reduce the variance of + the stochastic loss estimator. + control_variate_threshold: Threshold for the control variate. If the std + exceeds this threshold, the control variate is not used. + + Returns: + MSE between target score and network output, scaled by the weight function. + + """ + # Sample diffusion times. + if times is None: + times = ( + torch.rand(input.shape[0], device=input.device) + * (self.t_max - self.t_min) + + self.t_min + ) + + # Sample noise. + eps = torch.randn_like(input) + + # Compute mean and standard deviation. + mean = self.mean_fn(input, times) + std = self.std_fn(times) + + # Get noised input, i.e., p(xt|x0). + input_noised = mean + std * eps + + # Compute true cond. score: -(noised_input - mean) / (std**2). + score_target = -eps / std + + # Predict score from noised input and diffusion time. + score_pred = self.forward(input_noised, condition, times) + + # Compute weights over time. + weights = self.weight_fn(times) + + # Compute MSE loss between network output and true score. + loss = torch.sum((score_pred - score_target) ** 2.0, dim=-1) + + # For times -> 0 this loss has high variance a standard method to reduce the + # variance is to use a control variate i.e. a term that has zero expectation but + # is strongly correlated with our objective. + # Such a term can be derived by performing a 0 th order taylor expansion score + # network around the mean (https://arxiv.org/pdf/2101.03288 for details). + # NOTE: As it is a taylor expansion it will only work well for small std. + + if control_variate: + D = input.shape[-1] + score_mean_pred = self.forward(mean, condition, times) + s = torch.squeeze(std, -1) + + # Loss terms that depend on eps + term1 = 2 / s * torch.sum(eps * score_mean_pred, dim=-1) + term2 = torch.sum(eps**2, dim=-1) / s**2 + # This term is the analytical expectation of the above term + term3 = D / s**2 + + control_variate = term3 - term1 - term2 + + control_variate = torch.where( + s < control_variate_threshold, control_variate, 0.0 + ) + + loss = loss + control_variate + + return weights * loss + + def approx_marginal_mean(self, times: Tensor) -> Tensor: + r"""Approximate the marginal mean of the target distribution at a given time. + + Args: + times: SDE time variable in [0,1]. + + Returns: + Approximate marginal mean at a given time. + """ + return self.mean_t_fn(times) * self.mean_0 + + def approx_marginal_std(self, times: Tensor) -> Tensor: + r"""Approximate the marginal standard deviation of the target distribution at a + given time. + + Args: + times: SDE time variable in [0,1]. + + Returns: + Approximate marginal standard deviation at a given time. + """ + vars = self.mean_t_fn(times) ** 2 * self.std_0**2 + self.std_fn(times) ** 2 + return torch.sqrt(vars) + + def mean_t_fn(self, times: Tensor) -> Tensor: + r"""Conditional mean function, E[xt|x0], specifying the "mean factor" at a given + time, which is always multiplied by x0 to get the mean of the noise distribution + , i.e., p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)). + + Args: + times: SDE time variable in [0,1]. + + Raises: + NotImplementedError: This method is implemented in each individual SDE + classes. + """ + raise NotImplementedError + + def mean_fn(self, x0: Tensor, times: Tensor) -> Tensor: + r"""Mean function of the SDE, which just multiplies the specific "mean factor" + by the original input x0, to get the mean of the noise distribution, i.e., + p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)). + + Args: + x0: Initial input data. + times: SDE time variable in [0,1]. + + Returns: + Mean of the noise distribution at a given time. + """ + return self.mean_t_fn(times) * x0 + + def std_fn(self, times: Tensor) -> Tensor: + r"""Standard deviation function of the noise distribution at a given time, + + i.e., p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)). + + Args: + times: SDE time variable in [0,1]. + + Raises: + NotImplementedError: This method is implemented in each individual SDE + classes. + """ + raise NotImplementedError + + def drift_fn(self, input: Tensor, times: Tensor) -> Tensor: + r"""Drift function, f(x,t), of the SDE described by dx = f(x,t)dt + g(x,t)dW. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Raises: + NotImplementedError: This method is implemented in each individual SDE + classes. + """ + raise NotImplementedError + + def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor: + r"""Diffusion function, g(x,t), of the SDE described by + dx = f(x,t)dt + g(x,t)dW. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Raises: + NotImplementedError: This method is implemented in each individual SDE + classes. + """ + raise NotImplementedError + + def _set_weight_fn(self, weight_fn: Union[str, Callable]): + """Set the weight function. + + Args: + weight_fn: Function to compute the weights over time. Can be one of the + following: + - "identity": constant weights (1.), + - "max_likelihood": weights proportional to the diffusion function, or + - a custom function that returns a Callable. + """ + if weight_fn == "identity": + self.weight_fn = lambda times: 1 + elif weight_fn == "max_likelihood": + self.weight_fn = ( + lambda times: self.diffusion_fn( + torch.ones((1,), device=times.device), times + ) + ** 2 + ) + elif weight_fn == "variance": + self.weight_fn = lambda times: self.std_fn(times) ** 2 + elif callable(weight_fn): + self.weight_fn = weight_fn + else: + raise ValueError(f"Weight function {weight_fn} not recognized.") + + +class VPScoreEstimator(ConditionalScoreEstimator): + """Class for score estimators with variance preserving SDEs (i.e., DDPM).""" + + def __init__( + self, + net: nn.Module, + input_shape: torch.Size, + condition_shape: torch.Size, + weight_fn: Union[str, Callable] = "max_likelihood", + beta_min: float = 0.01, + beta_max: float = 10.0, + mean_0: Union[Tensor, float] = 0.0, + std_0: Union[Tensor, float] = 1.0, + t_min: float = 1e-5, + t_max: float = 1.0, + ) -> None: + self.beta_min = beta_min + self.beta_max = beta_max + super().__init__( + net, + input_shape, + condition_shape, + mean_0=mean_0, + std_0=std_0, + weight_fn=weight_fn, + t_min=t_min, + t_max=t_max, + ) + + def mean_t_fn(self, times: Tensor) -> Tensor: + """Conditional mean function for variance preserving SDEs. + Args: + times: SDE time variable in [0,1]. + + Returns: + Conditional mean at a given time. + """ + phi = torch.exp( + -0.25 * times**2.0 * (self.beta_max - self.beta_min) + - 0.5 * times * self.beta_min + ) + for _ in range(len(self.input_shape)): + phi = phi.unsqueeze(-1) + return phi + + def std_fn(self, times: Tensor) -> Tensor: + """Standard deviation function for variance preserving SDEs. + Args: + times: SDE time variable in [0,1]. + + Returns: + Standard deviation at a given time. + """ + std = 1.0 - torch.exp( + -0.5 * times**2.0 * (self.beta_max - self.beta_min) - times * self.beta_min + ) + for _ in range(len(self.input_shape)): + std = std.unsqueeze(-1) + return torch.sqrt(std) + + def _beta_schedule(self, times: Tensor) -> Tensor: + """Linear beta schedule for mean scaling in variance preserving SDEs. + + Args: + times: SDE time variable in [0,1]. + + Returns: + Beta schedule at a given time. + """ + return self.beta_min + (self.beta_max - self.beta_min) * times + + def drift_fn(self, input: Tensor, times: Tensor) -> Tensor: + """Drift function for variance preserving SDEs. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Returns: + Drift function at a given time. + """ + phi = -0.5 * self._beta_schedule(times) + while len(phi.shape) < len(input.shape): + phi = phi.unsqueeze(-1) + return phi * input + + def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor: + """Diffusion function for variance preserving SDEs. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Returns: + Drift function at a given time. + """ + g = torch.sqrt(self._beta_schedule(times)) + while len(g.shape) < len(input.shape): + g = g.unsqueeze(-1) + return g + + +class SubVPScoreEstimator(ConditionalScoreEstimator): + """Class for score estimators with sub-variance preserving SDEs.""" + + def __init__( + self, + net: nn.Module, + input_shape: torch.Size, + condition_shape: torch.Size, + weight_fn: Union[str, Callable] = "max_likelihood", + beta_min: float = 0.01, + beta_max: float = 10.0, + mean_0: float = 0.0, + std_0: float = 1.0, + t_min: float = 1e-2, + t_max: float = 1.0, + ) -> None: + self.beta_min = beta_min + self.beta_max = beta_max + super().__init__( + net, + input_shape, + condition_shape, + weight_fn=weight_fn, + mean_0=mean_0, + std_0=std_0, + t_min=t_min, + t_max=t_max, + ) + + def mean_t_fn(self, times: Tensor) -> Tensor: + """Conditional mean function for sub-variance preserving SDEs. + Args: + times: SDE time variable in [0,1]. + + Returns: + Conditional mean at a given time. + """ + phi = torch.exp( + -0.25 * times**2.0 * (self.beta_max - self.beta_min) + - 0.5 * times * self.beta_min + ) + for _ in range(len(self.input_shape)): + phi = phi.unsqueeze(-1) + return phi + + def std_fn(self, times: Tensor) -> Tensor: + """Standard deviation function for variance preserving SDEs. + Args: + times: SDE time variable in [0,1]. + + Returns: + Standard deviation at a given time. + """ + std = 1.0 - torch.exp( + -0.5 * times**2.0 * (self.beta_max - self.beta_min) - times * self.beta_min + ) + for _ in range(len(self.input_shape)): + std = std.unsqueeze(-1) + return std + + def _beta_schedule(self, times: Tensor) -> Tensor: + """Linear beta schedule for mean scaling in sub-variance preserving SDEs. + (Same as for variance preserving SDEs.) + + Args: + times: SDE time variable in [0,1]. + + Returns: + Beta schedule at a given time. + """ + return self.beta_min + (self.beta_max - self.beta_min) * times + + def drift_fn(self, input: Tensor, times: Tensor) -> Tensor: + """Drift function for sub-variance preserving SDEs. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Returns: + Drift function at a given time. + """ + phi = -0.5 * self._beta_schedule(times) + + while len(phi.shape) < len(input.shape): + phi = phi.unsqueeze(-1) + + return phi * input + + def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor: + """Diffusion function for sub-variance preserving SDEs. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Returns: + Diffusion function at a given time. + """ + g = torch.sqrt( + torch.abs( + self._beta_schedule(times) + * ( + 1 + - torch.exp( + -2 * self.beta_min * times + - (self.beta_max - self.beta_min) * times**2 + ) + ) + ) + ) + + while len(g.shape) < len(input.shape): + g = g.unsqueeze(-1) + + return g + + +class VEScoreEstimator(ConditionalScoreEstimator): + """Class for score estimators with variance exploding SDEs (i.e., NCSN / SMLD).""" + + def __init__( + self, + net: nn.Module, + input_shape: torch.Size, + condition_shape: torch.Size, + weight_fn: Union[str, Callable] = "max_likelihood", + sigma_min: float = 1e-5, + sigma_max: float = 5.0, + mean_0: float = 0.0, + std_0: float = 1.0, + ) -> None: + self.sigma_min = sigma_min + self.sigma_max = sigma_max + super().__init__( + net, + input_shape, + condition_shape, + weight_fn=weight_fn, + mean_0=mean_0, + std_0=std_0, + ) + + def mean_t_fn(self, times: Tensor) -> Tensor: + """Conditional mean function for variance exploding SDEs, which is always 1. + + Args: + times: SDE time variable in [0,1]. + + Returns: + Conditional mean at a given time. + """ + phi = torch.ones_like(times, device=times.device) + for _ in range(len(self.input_shape)): + phi = phi.unsqueeze(-1) + return phi + + def std_fn(self, times: Tensor) -> Tensor: + """Standard deviation function for variance exploding SDEs. + + Args: + times: SDE time variable in [0,1]. + + Returns: + Standard deviation at a given time. + """ + std = self.sigma_min * (self.sigma_max / self.sigma_min) ** times + for _ in range(len(self.input_shape)): + std = std.unsqueeze(-1) + return std + + def _sigma_schedule(self, times: Tensor) -> Tensor: + """Geometric sigma schedule for variance exploding SDEs. + + Args: + times: SDE time variable in [0,1]. + + Returns: + Sigma schedule at a given time. + """ + return self.sigma_min * (self.sigma_max / self.sigma_min) ** times + + def drift_fn(self, input: Tensor, times: Tensor) -> Tensor: + """Drift function for variance exploding SDEs. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Returns: + Drift function at a given time. + """ + return torch.tensor([0.0]) + + def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor: + """Diffusion function for variance exploding SDEs. + + Args: + input: Original data, x0. + times: SDE time variable in [0,1]. + + Returns: + Diffusion function at a given time. + """ + g = self._sigma_schedule(times) * math.sqrt( + (2 * math.log(self.sigma_max / self.sigma_min)) + ) + + while len(g.shape) < len(input.shape): + g = g.unsqueeze(-1) + + return g diff --git a/sbi/neural_nets/density_estimators/shape_handling.py b/sbi/neural_nets/estimators/shape_handling.py similarity index 100% rename from sbi/neural_nets/density_estimators/shape_handling.py rename to sbi/neural_nets/estimators/shape_handling.py diff --git a/sbi/neural_nets/density_estimators/zuko_flow.py b/sbi/neural_nets/estimators/zuko_flow.py similarity index 98% rename from sbi/neural_nets/density_estimators/zuko_flow.py rename to sbi/neural_nets/estimators/zuko_flow.py index b8c9c8726..edc535d69 100644 --- a/sbi/neural_nets/density_estimators/zuko_flow.py +++ b/sbi/neural_nets/estimators/zuko_flow.py @@ -7,7 +7,7 @@ from torch import Tensor, nn from zuko.flows.core import Flow -from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator +from sbi.neural_nets.estimators.base import ConditionalDensityEstimator from sbi.sbi_types import Shape diff --git a/sbi/neural_nets/factory.py b/sbi/neural_nets/factory.py index 9a7af4c6e..1db2a096c 100644 --- a/sbi/neural_nets/factory.py +++ b/sbi/neural_nets/factory.py @@ -2,7 +2,7 @@ # under the Affero General Public License v3, see . -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Union from torch import nn @@ -32,6 +32,7 @@ ) from sbi.neural_nets.mdn import build_mdn from sbi.neural_nets.mnle import build_mnle +from sbi.neural_nets.score_nets import build_score_estimator from sbi.utils.nn_utils import check_net_device model_builders = { @@ -222,8 +223,8 @@ def flowmatching_nn( be used for Flow Matching. The returned function is to be passed to the Args: - model: The type of density estimator that will be created. One of [`mdn`, - `made`, `maf`, `maf_rqs`, `nsf`]. + model: the type of regression network to learn the vector field. One of ['mlp', + 'resnet']. z_score_theta: Whether to z-score parameters $\theta$ before passing them into the network, can take one of the following: - `none`, or None: do not z-score. @@ -238,9 +239,8 @@ def flowmatching_nn( density estimator is a normalizing flow (i.e. currently either a `maf` or a `nsf`). Ignored if density estimator is a `mdn` or `made`. num_blocks: Number of blocks if a ResNet is used. - embedding_net: Optional embedding network for x. - num_components: Number of mixture components for a mixture of Gaussians. - Ignored if density estimator is not an mdn. + num_frequencies: Number of frequencies for the time embedding. + embedding_net: Optional embedding network for the condition. kwargs: additional custom arguments passed to downstream build functions. """ implemented_models = ["mlp", "resnet"] @@ -370,3 +370,85 @@ def build_fn(batch_theta, batch_x): kwargs.pop("num_components") return build_fn_snpe_a if model == "mdn_snpe_a" else build_fn + + +def posterior_score_nn( + sde_type: str, + score_net_type: Union[str, nn.Module] = "mlp", + z_score_theta: Optional[str] = "independent", + z_score_x: Optional[str] = "independent", + t_embedding_dim: int = 16, + hidden_features: int = 50, + embedding_net: nn.Module = nn.Identity(), + **kwargs: Any, +) -> Callable: + """Build util function that builds a ScoreEstimator object for score-based + posteriors. + + Args: + sde_type: SDE type used, which defines the mean and std functions. One of: + - 'vp': Variance preserving. + - 'subvp': Sub-variance preserving. + - 've': Variance exploding. + Defaults to 'vp'. + score_net: Type of regression network. One of: + - 'mlp': Fully connected feed-forward network. + - 'resnet': Residual network (NOT IMPLEMENTED). + - nn.Module: Custom network + Defaults to 'mlp'. + z_score_theta: Whether to z-score thetas passing into the network, can be one + of: + - `none`, or None: do not z-score. + - `independent`: z-score each dimension independently. + - `structured`: treat dimensions as related, therefore compute mean and std + over the entire batch, instead of per-dimension. Should be used when each + sample is, for example, a time series or an image. + z_score_x: Whether to z-score xs passing into the network, same options as + z_score_theta. + t_embedding_dim: Embedding dimension of diffusion time. Defaults to 16. + hidden_features: Number of hidden units per layer. Defaults to 50. + embedding_net: Embedding network for x (conditioning variable). Defaults to + nn.Identity(). + + Returns: + Constructor function for NPSE. + """ + + kwargs = dict( + zip( + ( + "z_score_x", + "z_score_y", + "sde_type", + "score_net", + "t_embedding_dim", + "hidden_features", + "embedding_net_y", + ), + ( + z_score_theta, + z_score_x, + sde_type, + score_net_type, + t_embedding_dim, + hidden_features, + embedding_net, + ), + ), + **kwargs, + ) + + def build_fn(batch_theta, batch_x): + """Build function wrapper for the build_score_estimator function that + is required for the score posterior class. + + Args: + batch_theta: a batch of theta. + batch_x: a batch of x. + + Returns: + Callable: a ScoreEstimator object. + """ + return build_score_estimator(batch_x=batch_theta, batch_y=batch_x, **kwargs) + + return build_fn diff --git a/sbi/neural_nets/flow.py b/sbi/neural_nets/flow.py index c8f505e98..d820cfac7 100644 --- a/sbi/neural_nets/flow.py +++ b/sbi/neural_nets/flow.py @@ -1,7 +1,6 @@ # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed # under the Apache License Version 2.0, see - from functools import partial from typing import List, Optional, Sequence, Union @@ -15,7 +14,7 @@ ) from torch import Tensor, nn, relu, tanh, tensor, uint8 -from sbi.neural_nets.density_estimators import NFlowsFlow, ZukoFlow +from sbi.neural_nets.estimators import NFlowsFlow, ZukoFlow from sbi.utils.nn_utils import get_numel from sbi.utils.sbiutils import ( standardizing_net, diff --git a/sbi/neural_nets/flow_matcher.py b/sbi/neural_nets/flow_matcher.py index 82e8abcae..16e891d9f 100644 --- a/sbi/neural_nets/flow_matcher.py +++ b/sbi/neural_nets/flow_matcher.py @@ -14,7 +14,7 @@ from torch.nn import functional as F from zuko.nn import MLP as ZukoMLP -from sbi.neural_nets.density_estimators.flowmatching_estimator import ( +from sbi.neural_nets.estimators.flowmatching_estimator import ( FlowMatchingEstimator, VectorFieldNet, ) diff --git a/sbi/neural_nets/mdn.py b/sbi/neural_nets/mdn.py index 14e3a3955..a80254312 100644 --- a/sbi/neural_nets/mdn.py +++ b/sbi/neural_nets/mdn.py @@ -7,7 +7,7 @@ from pyknos.nflows import flows, transforms from torch import Tensor, nn -from sbi.neural_nets.density_estimators import NFlowsFlow +from sbi.neural_nets.estimators import NFlowsFlow from sbi.utils.nn_utils import get_numel from sbi.utils.sbiutils import ( standardizing_net, diff --git a/sbi/neural_nets/mnle.py b/sbi/neural_nets/mnle.py index cf661c89e..73bb5ea03 100644 --- a/sbi/neural_nets/mnle.py +++ b/sbi/neural_nets/mnle.py @@ -8,8 +8,8 @@ from torch import Tensor, nn from sbi.neural_nets.categorial import build_categoricalmassestimator -from sbi.neural_nets.density_estimators import MixedDensityEstimator -from sbi.neural_nets.density_estimators.mixed_density_estimator import _separate_input +from sbi.neural_nets.estimators import MixedDensityEstimator +from sbi.neural_nets.estimators.mixed_density_estimator import _separate_input from sbi.neural_nets.flow import ( build_made, build_maf, diff --git a/sbi/neural_nets/score_nets.py b/sbi/neural_nets/score_nets.py new file mode 100644 index 000000000..6fa704722 --- /dev/null +++ b/sbi/neural_nets/score_nets.py @@ -0,0 +1,376 @@ +from typing import Optional, Union + +import torch +import torch.nn as nn +from torch import Tensor + +from sbi.neural_nets.embedding_nets import GaussianFourierTimeEmbedding +from sbi.neural_nets.estimators.score_estimator import ( + ConditionalScoreEstimator, + SubVPScoreEstimator, + VEScoreEstimator, + VPScoreEstimator, +) +from sbi.utils.sbiutils import standardizing_net, z_score_parser, z_standardization +from sbi.utils.user_input_checks import check_data_device + + +class EmbedInputs(nn.Module): + """Constructs input handler that optionally standardizes and/or + embeds the input and conditioning variables, as well as the diffusion time + embedding. + """ + + def __init__(self, embedding_net_x, embedding_net_y, embedding_net_t): + """Initializes the input handler. + + Args: + embedding_net_x: Embedding network for x. + embedding_net_y: Embedding network for y. + embedding_net_t: Embedding network for time. + """ + super().__init__() + self.embedding_net_x = embedding_net_x + self.embedding_net_y = embedding_net_y + self.embedding_net_t = embedding_net_t + + def forward(self, x: Tensor, y: Tensor, t: Tensor) -> tuple: + """Forward pass of the input layer. + + Args: + inputs: theta (x), x (y), and diffusion time (t). + + Returns: + Potentially standardized and/or embedded output. + """ + + return ( + self.embedding_net_x(x), + self.embedding_net_y(y), + self.embedding_net_t(t), + ) + + +def build_input_handler( + batch_y: Tensor, + t_embedding_dim: int, + z_score_y: Optional[str] = "independent", + embedding_net_x: nn.Module = nn.Identity(), + embedding_net_y: nn.Module = nn.Identity(), +) -> nn.Module: + """Builds input layer for vector field regression, including time embedding, and + optionally z-scores. + + Args: + batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring. + batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring. + t_embedding_dim: Dimensionality of the time embedding. + z_score_x: Whether to z-score xs passing into the network, can be one of: + - `none`, or None: do not z-score. + - `independent`: z-score each dimension independently. + - `structured`: treat dimensions as related, therefore compute mean and std + over the entire batch, instead of per-dimension. Should be used when each + sample is, for example, a time series or an image. + z_score_y: Whether to z-score ys passing into the network, same options as + z_score_x. + embedding_net_x: Optional embedding network for x. + embedding_net_y: Optional embedding network for y. + + Returns: + Input handler that provides x, y, and time embedding, and optionally z-scores. + """ + + z_score_y_bool, structured_y = z_score_parser(z_score_y) + if z_score_y_bool: + embedding_net_y = nn.Sequential( + standardizing_net(batch_y, structured_y), embedding_net_y + ) + embedding_net_t = GaussianFourierTimeEmbedding(t_embedding_dim) + input_handler = EmbedInputs( + embedding_net_x, + embedding_net_y, + embedding_net_t, + ) + return input_handler + + +def build_score_estimator( + batch_x: Tensor, + batch_y: Tensor, + sde_type: Optional[str] = "vp", + score_net: Optional[Union[str, nn.Module]] = "mlp", + z_score_x: Optional[str] = "independent", + z_score_y: Optional[str] = "independent", + t_embedding_dim: int = 16, + num_layers: int = 3, + hidden_features: int = 50, + embedding_net_x: nn.Module = nn.Identity(), + embedding_net_y: nn.Module = nn.Identity(), + **kwargs, +) -> ConditionalScoreEstimator: + """Builds score estimator for score-based generative models. + + Args: + batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring. + batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring. + sde_type: SDE type used, which defines the mean and std functions. One of: + - 'vp': Variance preserving. + - 'subvp': Sub-variance preserving. + - 've': Variance exploding. + Defaults to 'vp'. + score_net: Type of regression network. One of: + - 'mlp': Fully connected feed-forward network. + - 'resnet': Residual network (NOT IMPLEMENTED). + - nn.Module: Custom network + Defaults to 'mlp'. + z_score_x: Whether to z-score xs passing into the network, can be one of: + - `none`, or None: do not z-score. + - `independent`: z-score each dimension independently. + - `structured`: treat dimensions as related, therefore compute mean and std + over the entire batch, instead of per-dimension. Should be used when each + sample is, for example, a time series or an image. + z_score_y: Whether to z-score ys passing into the network, same options as + z_score_x. + t_embedding_dim: Embedding dimension of diffusion time. Defaults to 16. + num_layers: Number of MLP hidden layers. Defaults to 3. + hidden_features: Number of hidden units per layer. Defaults to 50. + embedding_net_x: Embedding network for x. Defaults to nn.Identity(). + embedding_net_y: Embedding network for y. Defaults to nn.Identity(). + kwargs: Additional arguments that are passed by the build function for score + network hyperparameters. + + + Returns: + ScoreEstimator object with a specific SDE implementation. + """ + + """Builds score estimator for score-based generative models.""" + check_data_device(batch_x, batch_y) + + mean_0, std_0 = z_standardization(batch_x, z_score_x == "structured") + + # Default to variance-preserving SDE + if sde_type is None: + sde_type = "vp" + + input_handler = build_input_handler( + batch_y, + t_embedding_dim, + z_score_y, + embedding_net_x, + embedding_net_y, + ) + + # Infer the output dimensionalities of the embedding_net by making a forward pass. + x_numel = embedding_net_x(batch_x).shape[1:].numel() + y_numel = embedding_net_y(batch_y).shape[1:].numel() + + if score_net == "mlp": + score_net = MLP( + x_numel + y_numel + t_embedding_dim, + x_numel, + input_handler, + hidden_dim=hidden_features, + num_layers=num_layers, + ) + elif score_net == "ada_mlp": + score_net = AdaMLP( + x_numel, + t_embedding_dim + y_numel, + input_handler, + hidden_dim=hidden_features, + num_layers=num_layers, + ) + elif score_net == "resnet": + raise NotImplementedError + elif isinstance(score_net, nn.Module): + pass + else: + raise ValueError(f"Invalid score network: {score_net}") + + if sde_type == "vp": + estimator = VPScoreEstimator + elif sde_type == "ve": + estimator = VEScoreEstimator + elif sde_type == "subvp": + estimator = SubVPScoreEstimator + else: + raise ValueError(f"SDE type: {sde_type} not supported.") + + input_shape = batch_x.shape[1:] + condition_shape = batch_y.shape[1:] + return estimator( + score_net, input_shape, condition_shape, mean_0=mean_0, std_0=std_0, **kwargs + ) + + +class MLP(nn.Module): + """Simple fully connected neural network.""" + + def __init__( + self, + input_dim: int, + output_dim: int, + input_handler: nn.Module, + hidden_dim: int = 100, + num_layers: int = 5, + activation: nn.Module = nn.GELU(), + layer_norm: bool = True, + skip_connection: bool = True, + ): + """Initializes the MLP. + + Args: + input_dim: The dimensionality of the input tensor. + output_dim: The dimensionality of the output tensor. + input_handler: The input handler module. + hidden_dim: The dimensionality of the hidden layers. + num_layers: The number of hidden layers. + activation: The activation function. + layer_norm: Whether to use layer normalization. + skip_connection: Whether to use skip connections. + """ + super().__init__() + + self.input_handler = input_handler + self.num_layers = num_layers + self.activation = activation + self.skip_connection = skip_connection + + # Initialize layers + self.layers = nn.ModuleList() + + # Input layer + self.layers.append(nn.Linear(input_dim, hidden_dim)) + + # Hidden layers + for _ in range(num_layers - 1): + if layer_norm: + block = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.LayerNorm(hidden_dim), + activation, + ) + else: + block = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), activation) + self.layers.append(block) + + # Output layer + self.layers.append(nn.Linear(hidden_dim, output_dim)) + + def forward(self, x: Tensor, y: Tensor, t: Tensor) -> Tensor: + x, y, t = self.input_handler(x, y, t) + xyt = torch.cat([x, y, t], dim=-1) + + h = self.activation(self.layers[0](xyt)) + + # Forward pass through hidden layers + for i in range(1, self.num_layers - 1): + h_new = self.layers[i](h) + h = (h + h_new) if self.skip_connection else h_new + + # Output layer + output = self.layers[-1](h) + + return output + + +class AdaMLPBlock(nn.Module): + r"""Creates a residual MLP block module with adaptive layer norm for conditioning. + + Arguments: + hidden_dim: The dimensionality of the MLP block. + cond_dim: The number of embedding features. + """ + + def __init__( + self, + hidden_dim: int, + cond_dim: int, + mlp_ratio: int = 1, + ): + super().__init__() + + self.ada_ln = nn.Sequential( + nn.Linear(cond_dim, hidden_dim), + nn.SiLU(), + nn.Linear(hidden_dim, 3 * hidden_dim), + ) + + # Initialize the last layer to zero + self.ada_ln[-1].weight.data.zero_() + self.ada_ln[-1].bias.data.zero_() + + # MLP block + # NOTE: This can be made more flexible to support layer types. + self.block = nn.Sequential( + nn.LayerNorm(hidden_dim, elementwise_affine=False), + nn.Linear(hidden_dim, hidden_dim * mlp_ratio), + nn.GELU(), + nn.Linear(hidden_dim * mlp_ratio, hidden_dim), + ) + + def forward(self, x: Tensor, yt: Tensor) -> Tensor: + """ + Arguments: + x: The input tensor, with shape (B, D_x). + t: The embedding vector, with shape (B, D_t). + + Returns: + The output tensor, with shape (B, D_x). + """ + + a, b, c = self.ada_ln(yt).chunk(3, dim=-1) + + y = (a + 1) * x + b + y = self.block(y) + y = x + c * y + y = y / torch.sqrt(1 + c * c) + + return y + + +class AdaMLP(nn.Module): + """ + MLP denoising network using adaptive layer normalization for conditioning. + Relevant literature: https://arxiv.org/abs/2212.09748 + + See "Scalable Diffusion Models with Transformers", by William Peebles, Saining Xie. + + Arguments: + x_dim: The dimensionality of the input tensor. + emb_dim: The number of embedding features. + input_handler: The input handler module. + hidden_dim: The dimensionality of the MLP block. + num_layers: The number of MLP blocks. + **kwargs: Key word arguments handed to the AdaMLPBlock. + """ + + def __init__( + self, + x_dim: int, + emb_dim: int, + input_handler: nn.Module, + hidden_dim: int = 100, + num_layers: int = 3, + **kwargs, + ): + super().__init__() + self.input_handler = input_handler + self.num_layers = num_layers + + self.ada_blocks = nn.ModuleList() + for _i in range(num_layers): + self.ada_blocks.append(AdaMLPBlock(hidden_dim, emb_dim, **kwargs)) + + self.input_layer = nn.Linear(x_dim, hidden_dim) + self.output_layer = nn.Linear(hidden_dim, x_dim) + + def forward(self, x: Tensor, y: Tensor, t: Tensor) -> Tensor: + x, y, t = self.input_handler(x, y, t) + yt = torch.cat([y, t], dim=-1) + + h = self.input_layer(x) + for i in range(self.num_layers): + h = self.ada_blocks[i](h, yt) + return self.output_layer(h) diff --git a/sbi/samplers/score/correctors.py b/sbi/samplers/score/correctors.py new file mode 100644 index 000000000..e64b370d7 --- /dev/null +++ b/sbi/samplers/score/correctors.py @@ -0,0 +1,65 @@ +from abc import ABC, abstractmethod +from typing import Callable, Optional, Type + +from torch import Tensor + +from sbi.samplers.score.predictors import Predictor + +CORRECTORS = {} + + +def get_corrector(name: str, predictor: Predictor, **kwargs) -> "Corrector": + """Helper function to get corrector by name. + + Args: + name: Name of the corrector. + predictor: Predictor to initialize the corrector. + + Returns: + Corrector: The corrector. + """ + return CORRECTORS[name](predictor, **kwargs) + + +def register_corrector(name: str) -> Callable: + """Register a corrector. + + Args: + name (str): Name of the corrector. + + Returns: + Callable: Decorator for registering the corrector. + """ + + def decorator(corrector: Type[Corrector]) -> Callable: + assert issubclass( + corrector, Corrector + ), "Corrector must be a subclass of Corrector." + CORRECTORS[name] = corrector + return corrector + + return decorator + + +class Corrector(ABC): + def __init__( + self, + predictor: Predictor, + ): + """Base class for correctors. + + Args: + predictor (Predictor): The associated predictor. + """ + self.predictor = predictor + self.potential_fn = predictor.potential_fn + self.device = predictor.device + + def __call__( + self, theta: Tensor, t0: Tensor, t1: Optional[Tensor] = None + ) -> Tensor: + return self.correct(theta, t0, t1) + + @abstractmethod + def correct(self, theta: Tensor, t0: Tensor, t1: Optional[Tensor] = None) -> Tensor: + pass diff --git a/sbi/samplers/score/predictors.py b/sbi/samplers/score/predictors.py new file mode 100644 index 000000000..3f0a2eba6 --- /dev/null +++ b/sbi/samplers/score/predictors.py @@ -0,0 +1,122 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +from abc import ABC, abstractmethod +from typing import Callable, Type + +import torch +from torch import Tensor + +from sbi.inference.potentials.score_based_potential import ( + PosteriorScoreBasedPotential, +) + +PREDICTORS = {} + + +def get_predictor( + name: str, score_based_potential: PosteriorScoreBasedPotential, **kwargs +) -> "Predictor": + """Helper function to get predictor by name. + + Args: + name: Name of the predictor. + score_based_potential: Score-based potential to initialize the predictor. + """ + return PREDICTORS[name](score_based_potential, **kwargs) + + +def register_predictor(name: str) -> Callable: + """Register a predictor. + + Args: + name (str): Name of the predictor. + + Returns: + Callable: Decorator for registering the predictor. + """ + + def decorator(predictor: Type[Predictor]) -> Callable: + assert issubclass( + predictor, Predictor + ), "Predictor must be a subclass of Predictor." + PREDICTORS[name] = predictor + return predictor + + return decorator + + +class Predictor(ABC): + """Predictor base class. + + See child classes for more detail. + """ + + def __init__( + self, + potential_fn: PosteriorScoreBasedPotential, + ): + """Initialize predictor. + + Args: + potential_fn: potential with gradient from which to sample. + """ + self.potential_fn = potential_fn + self.device = potential_fn.device + + # Extract relevant functions from the score function + self.drift = self.potential_fn.score_estimator.drift_fn + self.diffusion = self.potential_fn.score_estimator.diffusion_fn + + def __call__(self, theta: Tensor, t1: Tensor, t0: Tensor) -> Tensor: + """Run prediction. + + Args: + theta: Parameters. + t1: Time. + t0: Time. + """ + return self.predict(theta, t1, t0) + + @abstractmethod + def predict(self, theta: Tensor, t1: Tensor, t0: Tensor) -> Tensor: + """Run prediction. + + Args: + theta: Parameters. + t1: Time. + t0: Time. + """ + pass + + +@register_predictor("euler_maruyama") +class EulerMaruyama(Predictor): + def __init__( + self, + potential_fn: PosteriorScoreBasedPotential, + eta: float = 1.0, + ): + """Simple Euler-Maruyama discretization of the associated family of reverse + SDEs. + + Args: + potential_fn: Score-based potential to predict. + eta: Mediates how much noise is added during sampling i.e. + for values approaching 0 this becomes the deterministic probabilifty + flow ODE. For large values it becomes a more stochastic reverse SDE. + Defaults to 1.0. + """ + super().__init__(potential_fn) + assert eta > 0, "eta must be positive." + self.eta = eta + + def predict(self, theta: Tensor, t1: Tensor, t0: Tensor): + dt = t1 - t0 + dt_sqrt = torch.sqrt(dt) + f = self.drift(theta, t1) + g = self.diffusion(theta, t1) + score = self.potential_fn.gradient(theta, t1) + f_backward = f - (1 + self.eta**2) / 2 * g**2 * score + g_backward = self.eta * g + return theta - f_backward * dt + g_backward * torch.randn_like(theta) * dt_sqrt diff --git a/sbi/samplers/score/score.py b/sbi/samplers/score/score.py new file mode 100644 index 000000000..57aa6d5b1 --- /dev/null +++ b/sbi/samplers/score/score.py @@ -0,0 +1,160 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +from typing import Optional, Union + +import torch +from torch import Tensor +from tqdm.auto import tqdm + +from sbi.inference.potentials.score_based_potential import ( + PosteriorScoreBasedPotential, +) +from sbi.samplers.score.correctors import Corrector, get_corrector +from sbi.samplers.score.predictors import Predictor, get_predictor + + +class Diffuser: + predictor: Predictor + corrector: Optional[Corrector] + + def __init__( + self, + score_based_potential: PosteriorScoreBasedPotential, + predictor: Union[str, Predictor], + corrector: Optional[Union[str, Corrector]] = None, + predictor_params: Optional[dict] = None, + corrector_params: Optional[dict] = None, + ): + """Diffusion-based sampler for score-based sampling i.e it requires the + gradient of a family of distributions (for different times) characterized by the + gradient of a potential function (i.e. the score function). The sampler uses a + predictor to propagate samples forward in time. Optionally, a corrector can be + used to refine the samples at the current time. + + Args: + score_based_potential_gradient: A time-dependent score-based potential. + predictor: A predictor to propagate samples forward in time. + corrector (Ooptional): A corrector to refine the samples. Defaults to None. + predictor_params (optional): Parameters passed to the predictor, if given as + string. Defaults to None. + corrector_params (optional): Parameters passed to the corrector, if given as + string. Defaults to None. + """ + # Set predictor and corrector + self.set_predictor(predictor, score_based_potential, **(predictor_params or {})) + self.set_corrector(corrector, **(corrector_params or {})) + self.device = self.predictor.device + + # Extract time limits from the score function + self.t_min = score_based_potential.score_estimator.t_min + self.t_max = score_based_potential.score_estimator.t_max + + # Extract initial moments + self.init_mean = score_based_potential.score_estimator.mean_t + self.init_std = score_based_potential.score_estimator.std_t + + # Extract relevant shapes from the score function + self.input_shape = score_based_potential.score_estimator.input_shape + self.condition_shape = score_based_potential.score_estimator.condition_shape + condition_dim = len(self.condition_shape) + # TODO: this is the iid setting and we don't want to generate num_obs samples, + # but only one sample given the condition. + self.batch_shape = score_based_potential.x_o.shape[:-condition_dim] + + def set_predictor( + self, + predictor: Union[str, Predictor], + score_based_potential: PosteriorScoreBasedPotential, + **kwargs, + ): + """Set the predictor for the diffusion-based sampler.""" + if isinstance(predictor, str): + self.predictor = get_predictor(predictor, score_based_potential, **kwargs) + else: + self.predictor = predictor + + def set_corrector(self, corrector: Optional[Union[str, Corrector]], **kwargs): + """Set the corrector for the diffusion-based sampler.""" + if corrector is None: + self.corrector = None + elif isinstance(corrector, Corrector): + self.corrector = corrector + else: + self.corrector = get_corrector(corrector, self.predictor, **kwargs) + + def initialize(self, num_samples: int) -> Tensor: + """Initialize the sampler by drawing samples from the initial distribution. + + If we have to sample from a batch of distributions, we draw samples from each + distribution in the batch i.e. of shape (num_batch, num_samples, input_shape). + + Args: + num_samples (int): Number of samples to draw. + + Returns: + Tensor: _description_ + """ + # TODO: for iid setting, self.batch_shape.numel() will be the iid-batch. But we + # don't want to generate num_obs samples, but only one sample given the the iid + # batch. + # TODO: the solution will probably be to distinguish between the iid setting and + # batched sampling setting with a flag. + # TODO: this fixes the iid setting shape problems, but iid inference via + # iid_bridge is not accurate. + # num_batch = self.batch_shape.numel() + # init_shape = (num_batch, num_samples) + self.input_shape + init_shape = ( + num_samples, + ) + self.input_shape # just use num_samples, not num_batch + # NOTE: for the IID setting we might need to scale the noise with iid batch + # size, as in equation (7) in the paper. + eps = torch.randn(init_shape, device=self.device) + mean, std, eps = torch.broadcast_tensors(self.init_mean, self.init_std, eps) + return mean + std * eps + + @torch.no_grad() + def run( + self, + num_samples: int, + ts: Tensor, + show_progress_bars: bool = True, + save_intermediate: bool = False, + ) -> Tensor: + """Samples from the distribution at the final time point by propagating samples + forward in time using the predictor and optionally refining them using the a + corrector. + + Args: + num_samples: Number of samples to draw. + ts: Time grid to propagate samples forward, or "solve" the SDE. + show_progress_bars (optional): Shows a progressbar or not. Defaults to True. + save_intermediate (optional): Returns samples at all time point, instead of + only returning samples at the end. Defaults to False. + + Returns: + Tensor: Samples from the distribution(s). + """ + samples = self.initialize(num_samples) + pbar = tqdm( + range(1, ts.numel()), + disable=not show_progress_bars, + desc=f"Drawing {num_samples} posterior samples", + ) + + if save_intermediate: + intermediate_samples = [samples] + + for i in pbar: + t1 = ts[i - 1] + t0 = ts[i] + samples = self.predictor(samples, t1, t0) + if self.corrector is not None: + samples = self.corrector(samples, t0, t1) + if save_intermediate: + intermediate_samples.append(samples) + + if save_intermediate: + return torch.cat(intermediate_samples, dim=0) + else: + return samples diff --git a/sbi/simulators/linear_gaussian.py b/sbi/simulators/linear_gaussian.py index 985fa0eae..251b807b0 100644 --- a/sbi/simulators/linear_gaussian.py +++ b/sbi/simulators/linear_gaussian.py @@ -50,7 +50,7 @@ def linear_gaussian( Returns: Simulated data. """ - + theta = torch.as_tensor(theta) # Must be a tensor if num_discarded_dims: theta = theta[:, :-num_discarded_dims] diff --git a/sbi/utils/__init__.py b/sbi/utils/__init__.py index 621758232..93a301580 100644 --- a/sbi/utils/__init__.py +++ b/sbi/utils/__init__.py @@ -69,3 +69,4 @@ validate_theta_and_x, ) from sbi.utils.user_input_checks_utils import MultipleIndependent +from sbi.utils.get_nn_models import posterior_nn, likelihood_nn, classifier_nn diff --git a/sbi/utils/metrics.py b/sbi/utils/metrics.py index 1e9c148c2..7de4a67cd 100644 --- a/sbi/utils/metrics.py +++ b/sbi/utils/metrics.py @@ -99,6 +99,8 @@ def c2st( X_std = torch.std(X, dim=0) # Set std to 1 if it is close to zero. X_std[X_std < 1e-14] = 1 + assert not torch.any(torch.isnan(X_mean)), "X_mean contains NaNs" + assert not torch.any(torch.isnan(X_std)), "X_std contains NaNs" X = (X - X_mean) / X_std Y = (Y - X_mean) / X_std diff --git a/sbi/utils/user_input_checks.py b/sbi/utils/user_input_checks.py index 44d87f5c6..1f093a260 100644 --- a/sbi/utils/user_input_checks.py +++ b/sbi/utils/user_input_checks.py @@ -748,7 +748,10 @@ def test_posterior_net_for_multi_d_x(net, theta: Tensor, x: Tensor) -> None: """ try: # torch.nn.functional needs at least two inputs here. - net.log_prob(theta[:, :2], condition=x[:2]) + if hasattr(net, "log_prob"): + # This only is checked for density estimators, not for classifiers and + # others + net.log_prob(theta[:, :2], condition=x[:2]) except RuntimeError as rte: ndims = x.ndim diff --git a/tests/density_estimator_test.py b/tests/density_estimator_test.py index 1431bdaac..4e4f76cd7 100644 --- a/tests/density_estimator_test.py +++ b/tests/density_estimator_test.py @@ -12,10 +12,10 @@ from sbi.neural_nets import build_mnle from sbi.neural_nets.categorial import build_categoricalmassestimator -from sbi.neural_nets.density_estimators.shape_handling import ( +from sbi.neural_nets.embedding_nets import CNNEmbedding +from sbi.neural_nets.estimators.shape_handling import ( reshape_to_sample_batch_event, ) -from sbi.neural_nets.embedding_nets import CNNEmbedding from sbi.neural_nets.flow import ( build_maf, build_maf_rqs, diff --git a/tests/lc2st_test.py b/tests/lc2st_test.py index 7dbed8aca..32c0c668b 100644 --- a/tests/lc2st_test.py +++ b/tests/lc2st_test.py @@ -197,7 +197,7 @@ def test_lc2st_true_positiv_rate(method): # good estimator: big training and num_epochs = accept # (convergence of the estimator) - num_train = 10_000 + num_train = 5_000 num_epochs = 200 num_cal = 1_000 diff --git a/tests/linearGaussian_npse_test.py b/tests/linearGaussian_npse_test.py new file mode 100644 index 000000000..ca3156f11 --- /dev/null +++ b/tests/linearGaussian_npse_test.py @@ -0,0 +1,237 @@ +from typing import List + +import pytest +import torch +from torch import eye, ones, zeros +from torch.distributions import MultivariateNormal + +from sbi import analysis as analysis +from sbi import utils as utils +from sbi.inference import NPSE +from sbi.simulators import linear_gaussian +from sbi.simulators.linear_gaussian import ( + samples_true_posterior_linear_gaussian_mvn_prior_different_dims, + samples_true_posterior_linear_gaussian_uniform_prior, + true_posterior_linear_gaussian_mvn_prior, +) + +from .test_utils import check_c2st, get_dkl_gaussian_prior + + +# We always test num_dim and sample_with with defaults and mark the rests as slow. +@pytest.mark.parametrize( + "sde_type, num_dim, prior_str, sample_with", + [ + ("vp", 1, "gaussian", ["sde", "ode"]), + ("vp", 3, "uniform", ["sde", "ode"]), + ("vp", 3, "gaussian", ["sde", "ode"]), + ("ve", 3, "uniform", ["sde", "ode"]), + ("subvp", 3, "uniform", ["sde", "ode"]), + ], +) +def test_c2st_npse_on_linearGaussian( + sde_type, num_dim: int, prior_str: str, sample_with: List[str] +): + """Test whether NPSE infers well a simple example with available ground truth.""" + + x_o = zeros(1, num_dim) + num_samples = 1000 + num_simulations = 10_000 + + # likelihood_mean will be likelihood_shift+theta + likelihood_shift = -1.0 * ones(num_dim) + likelihood_cov = 0.3 * eye(num_dim) + + if prior_str == "gaussian": + prior_mean = zeros(num_dim) + prior_cov = eye(num_dim) + prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov) + gt_posterior = true_posterior_linear_gaussian_mvn_prior( + x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov + ) + target_samples = gt_posterior.sample((num_samples,)) + else: + prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim)) + target_samples = samples_true_posterior_linear_gaussian_uniform_prior( + x_o, + likelihood_shift, + likelihood_cov, + prior=prior, + num_samples=num_samples, + ) + + inference = NPSE(prior, sde_type=sde_type, show_progress_bars=True) + + theta = prior.sample((num_simulations,)) + x = linear_gaussian(theta, likelihood_shift, likelihood_cov) + + score_estimator = inference.append_simulations(theta, x).train( + training_batch_size=100 + ) + # amortize the training when testing sample_with. + for method in sample_with: + posterior = inference.build_posterior(score_estimator, sample_with=method) + posterior.set_default_x(x_o) + samples = posterior.sample((num_samples,)) + + # Compute the c2st and assert it is near chance level of 0.5. + check_c2st( + samples, + target_samples, + alg=f"npse-{sde_type or 'vp'}-{prior_str}-{num_dim}D-{method}", + ) + + # Checks for log_prob() + if prior_str == "gaussian": + # For the Gaussian prior, we compute the KLd between ground truth and + # posterior. + dkl = get_dkl_gaussian_prior( + posterior, + x_o[0], + likelihood_shift, + likelihood_cov, + prior_mean, + prior_cov, + ) + + max_dkl = 0.15 + + assert ( + dkl < max_dkl + ), f"D-KL={dkl} is more than 2 stds above the average performance." + + +def test_c2st_npse_on_linearGaussian_different_dims(): + """Test SNPE on linear Gaussian with different theta and x dimensionality.""" + + theta_dim = 3 + x_dim = 2 + discard_dims = theta_dim - x_dim + + x_o = zeros(1, x_dim) + num_samples = 1000 + num_simulations = 2000 + + # likelihood_mean will be likelihood_shift+theta + likelihood_shift = -1.0 * ones(x_dim) + likelihood_cov = 0.3 * eye(x_dim) + + prior_mean = zeros(theta_dim) + prior_cov = eye(theta_dim) + prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov) + target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims( + x_o, + likelihood_shift, + likelihood_cov, + prior_mean, + prior_cov, + num_discarded_dims=discard_dims, + num_samples=num_samples, + ) + + def simulator(theta): + return linear_gaussian( + theta, + likelihood_shift, + likelihood_cov, + num_discarded_dims=discard_dims, + ) + + # Test whether prior can be `None`. + inference = NPSE(prior=None) + + theta = prior.sample((num_simulations,)) + x = simulator(theta) + + # Test whether we can stop and resume. + inference.append_simulations(theta, x).train( + max_num_epochs=10, training_batch_size=100 + ) + inference.train( + resume_training=True, force_first_round_loss=True, training_batch_size=100 + ) + posterior = inference.build_posterior().set_default_x(x_o) + samples = posterior.sample((num_samples,)) + + # Compute the c2st and assert it is near chance level of 0.5. + check_c2st(samples, target_samples, alg="npse_different_dims_and_resume_training") + + +@pytest.mark.xfail( + reason="iid_bridge not working.", + raises=NotImplementedError, + strict=True, + match="Score accumulation*", +) +@pytest.mark.parametrize("num_trials", [2, 10]) +def test_npse_iid_inference(num_trials): + """Test whether NPSE infers well a simple example with available ground truth.""" + + num_dim = 2 + x_o = zeros(num_trials, num_dim) + num_samples = 1000 + num_simulations = 3000 + + # likelihood_mean will be likelihood_shift+theta + likelihood_shift = -1.0 * ones(num_dim) + likelihood_cov = 0.3 * eye(num_dim) + + prior_mean = zeros(num_dim) + prior_cov = eye(num_dim) + prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov) + gt_posterior = true_posterior_linear_gaussian_mvn_prior( + x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov + ) + target_samples = gt_posterior.sample((num_samples,)) + + inference = NPSE(prior, show_progress_bars=True) + + theta = prior.sample((num_simulations,)) + x = linear_gaussian(theta, likelihood_shift, likelihood_cov) + + score_estimator = inference.append_simulations(theta, x).train( + training_batch_size=100, + ) + posterior = inference.build_posterior(score_estimator) + posterior.set_default_x(x_o) + samples = posterior.sample((num_samples,)) + + # Compute the c2st and assert it is near chance level of 0.5. + check_c2st( + samples, target_samples, alg=f"npse-vp-gaussian-1D-{num_trials}iid-trials" + ) + + +@pytest.mark.slow +@pytest.mark.xfail( + raises=NotImplementedError, + reason="MAP optimization via score not working accurately.", +) +def test_npse_map(): + num_dim = 2 + x_o = zeros(num_dim) + num_simulations = 3000 + + # likelihood_mean will be likelihood_shift+theta + likelihood_shift = -1.0 * ones(num_dim) + likelihood_cov = 0.3 * eye(num_dim) + + prior_mean = zeros(num_dim) + prior_cov = eye(num_dim) + prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov) + gt_posterior = true_posterior_linear_gaussian_mvn_prior( + x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov + ) + inference = NPSE(prior, show_progress_bars=True) + + theta = prior.sample((num_simulations,)) + x = linear_gaussian(theta, likelihood_shift, likelihood_cov) + + inference.append_simulations(theta, x).train( + training_batch_size=100, max_num_epochs=10 + ) + posterior = inference.build_posterior().set_default_x(x_o) + + map_ = posterior.map(show_progress_bars=True) + + assert torch.allclose(map_, gt_posterior.mean, atol=0.2), "MAP is not close to GT." diff --git a/tests/linearGaussian_snpe_test.py b/tests/linearGaussian_snpe_test.py index e4dcfd9ab..2eeeb4d7e 100644 --- a/tests/linearGaussian_snpe_test.py +++ b/tests/linearGaussian_snpe_test.py @@ -30,6 +30,7 @@ true_posterior_linear_gaussian_mvn_prior, ) from sbi.utils import RestrictedPrior, get_density_thresholder +from sbi.utils.user_input_checks import process_prior, process_simulator from .sbiutils_test import conditional_of_mvn from .test_utils import ( @@ -156,7 +157,7 @@ def test_density_estimators_on_linearGaussian(density_estimator): x_o = zeros(1, x_dim) num_samples = 1000 - num_simulations = 2000 + num_simulations = 2500 # likelihood_mean will be likelihood_shift+theta likelihood_shift = -1.0 * ones(x_dim) @@ -477,6 +478,8 @@ def simulator(theta): return linear_gaussian(theta, likelihood_shift, likelihood_cov) inference = SNPE_C(prior, show_progress_bars=False) + prior, _, prior_returns_numpy = process_prior(prior) + simulator = process_simulator(simulator, prior, prior_returns_numpy) proposal = prior for _ in range(2): diff --git a/tests/posterior_nn_test.py b/tests/posterior_nn_test.py index 10ecf5490..8f42a2dfb 100644 --- a/tests/posterior_nn_test.py +++ b/tests/posterior_nn_test.py @@ -32,7 +32,13 @@ ( 0, 1, - pytest.param(2, marks=pytest.mark.xfail(raises=AssertionError)), + pytest.param( + 2, + marks=pytest.mark.xfail( + raises=AssertionError, + reason=".log_prob() supports only batch size 1 for x_o.", + ), + ), ), ) def test_log_prob_with_different_x(snpe_method: type, x_o_batch_dim: bool): diff --git a/tests/sbc_test.py b/tests/sbc_test.py index 42940d6b5..ea611500a 100644 --- a/tests/sbc_test.py +++ b/tests/sbc_test.py @@ -12,12 +12,9 @@ from sbi.analysis import sbc_rank_plot from sbi.diagnostics import check_sbc, get_nltp, run_sbc -from sbi.inference import SNLE, SNPE, simulate_for_sbi -from sbi.simulators.linear_gaussian import ( - linear_gaussian, -) +from sbi.inference import NPSE, SNLE, SNPE +from sbi.simulators.linear_gaussian import linear_gaussian from sbi.utils import BoxUniform, MultipleIndependent -from sbi.utils.user_input_checks import process_prior, process_simulator from tests.test_utils import PosteriorPotential, TractablePosterior @@ -29,11 +26,10 @@ (SNPE, None), pytest.param(SNLE, "mcmc", marks=pytest.mark.mcmc), pytest.param(SNLE, "vi", marks=pytest.mark.mcmc), + (NPSE, None), ), ) -def test_running_sbc( - method, prior, reduce_fn_str, sampler, mcmc_params_accurate: dict, model="mdn" -): +def test_running_sbc(method, prior, reduce_fn_str, sampler, mcmc_params_accurate: dict): """Tests running inference and then SBC and obtaining nltp.""" num_dim = 2 @@ -53,18 +49,12 @@ def test_running_sbc( likelihood_shift = -1.0 * ones(num_dim) likelihood_cov = 0.3 * eye(num_dim) - def simulator(theta): - return linear_gaussian(theta, likelihood_shift, likelihood_cov) - - inferer = method(prior, show_progress_bars=False, density_estimator=model) + theta = prior.sample((num_simulations,)) + x = linear_gaussian(theta, likelihood_shift, likelihood_cov) - prior, _, prior_returns_numpy = process_prior(prior) - simulator = process_simulator(simulator, prior, prior_returns_numpy) - theta, x = simulate_for_sbi(simulator, prior, num_simulations) + inferer = method(prior, show_progress_bars=False) - _ = inferer.append_simulations(theta, x).train( - training_batch_size=100, max_num_epochs=max_num_epochs - ) + inferer.append_simulations(theta, x).train(max_num_epochs=max_num_epochs) if method == SNLE: posterior_kwargs = { "sample_with": "mcmc" if sampler == "mcmc" else "vi", @@ -77,7 +67,7 @@ def simulator(theta): posterior = inferer.build_posterior(**posterior_kwargs) thetas = prior.sample((num_sbc_runs,)) - xs = simulator(thetas) + xs = linear_gaussian(thetas, likelihood_shift, likelihood_cov) reduce_fn = "marginals" if reduce_fn_str == "marginals" else posterior.log_prob run_sbc( diff --git a/tests/score_estimator_test.py b/tests/score_estimator_test.py new file mode 100644 index 000000000..ef03b6275 --- /dev/null +++ b/tests/score_estimator_test.py @@ -0,0 +1,146 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +from __future__ import annotations + +from typing import Tuple + +import pytest +import torch + +from sbi.neural_nets.embedding_nets import CNNEmbedding +from sbi.neural_nets.score_nets import build_score_estimator + + +@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"]) +@pytest.mark.parametrize("input_sample_dim", (1, 2)) +@pytest.mark.parametrize("input_event_shape", ((1,), (4,))) +@pytest.mark.parametrize("condition_event_shape", ((1,), (7,))) +@pytest.mark.parametrize("batch_dim", (1, 10)) +@pytest.mark.parametrize("score_net", ["mlp", "ada_mlp"]) +def test_score_estimator_loss_shapes( + sde_type, + input_sample_dim, + input_event_shape, + condition_event_shape, + batch_dim, + score_net, +): + """Test whether `loss` of DensityEstimators follow the shape convention.""" + score_estimator, inputs, conditions = _build_score_estimator_and_tensors( + sde_type, + input_event_shape, + condition_event_shape, + batch_dim, + input_sample_dim, + score_net=score_net, + ) + + losses = score_estimator.loss(inputs[0], condition=conditions) + assert losses.shape == (batch_dim,) + + +@pytest.mark.gpu +@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"]) +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_score_estimator_on_device(sde_type, device): + """Test whether DensityEstimators can be moved to the device.""" + score_estimator = build_score_estimator( + torch.randn(100, 1), torch.randn(100, 1), sde_type=sde_type + ) + score_estimator.to(device) + + # Test forward + inputs = torch.randn(100, 1, device=device) + condition = torch.randn(100, 1, device=device) + time = torch.randn(1, device=device) + out = score_estimator(inputs, condition, time) + + assert str(out.device).split(":")[0] == device, "Output device mismatch." + + # Test loss + loss = score_estimator.loss(inputs, condition) + assert str(loss.device).split(":")[0] == device, "Loss device mismatch." + + +@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"]) +@pytest.mark.parametrize("input_sample_dim", (1, 2)) +@pytest.mark.parametrize("input_event_shape", ((1,), (4,))) +@pytest.mark.parametrize("condition_event_shape", ((1,), (7,))) +@pytest.mark.parametrize("batch_dim", (1, 10)) +@pytest.mark.parametrize("score_net", ["mlp", "ada_mlp"]) +def test_score_estimator_forward_shapes( + sde_type, + input_sample_dim, + input_event_shape, + condition_event_shape, + batch_dim, + score_net, +): + """Test whether `forward` of DensityEstimators follow the shape convention.""" + score_estimator, inputs, conditions = _build_score_estimator_and_tensors( + sde_type, + input_event_shape, + condition_event_shape, + batch_dim, + input_sample_dim, + score_net=score_net, + ) + # Batched times + times = torch.rand((batch_dim,)) + outputs = score_estimator(inputs[0], condition=conditions, time=times) + assert outputs.shape == (batch_dim, *input_event_shape), "Output shape mismatch." + + # Single time + time = torch.rand(()) + outputs = score_estimator(inputs[0], condition=conditions, time=time) + assert outputs.shape == (batch_dim, *input_event_shape), "Output shape mismatch." + + +def _build_score_estimator_and_tensors( + sde_type: str, + input_event_shape: Tuple[int], + condition_event_shape: Tuple[int], + batch_dim: int, + input_sample_dim: int = 1, + **kwargs, +): + """Helper function for all tests that deal with shapes of density estimators.""" + + # Use discrete thetas such that categorical density esitmators can also use them. + building_thetas = torch.randint( + 0, 4, (1000, *input_event_shape), dtype=torch.float32 + ) + building_xs = torch.randn((1000, *condition_event_shape)) + + if len(condition_event_shape) > 1: + embedding_net_y = CNNEmbedding(condition_event_shape, kernel_size=1) + else: + embedding_net_y = torch.nn.Identity() + + if len(input_event_shape) > 1: + embedding_net_x = CNNEmbedding(input_event_shape, kernel_size=1) + else: + embedding_net_x = torch.nn.Identity() + + score_estimator = build_score_estimator( + torch.randn_like(building_thetas), + torch.randn_like(building_xs), + sde_type=sde_type, + embedding_net_x=embedding_net_x, + embedding_net_y=embedding_net_y, + **kwargs, + ) + + inputs = building_thetas[:batch_dim] + condition = building_xs[:batch_dim] + + inputs = inputs.unsqueeze(0) + inputs = inputs.expand( + [ + input_sample_dim, + ] + + [-1] * (1 + len(input_event_shape)) + ) + condition = condition + return score_estimator, inputs, condition diff --git a/tests/score_samplers_test.py b/tests/score_samplers_test.py new file mode 100644 index 000000000..84af99224 --- /dev/null +++ b/tests/score_samplers_test.py @@ -0,0 +1,82 @@ +# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed +# under the Apache License Version 2.0, see + +from __future__ import annotations + +from typing import Tuple + +import pytest +import torch +from torch import Tensor + +from sbi.inference.potentials.score_based_potential import ( + score_estimator_based_potential, +) +from sbi.neural_nets.score_nets import build_score_estimator +from sbi.samplers.score.score import Diffuser + + +@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"]) +@pytest.mark.parametrize("predictor", ("euler_maruyama",)) +@pytest.mark.parametrize("corrector", (None,)) +@pytest.mark.parametrize("input_event_shape", ((1,), (4,))) +@pytest.mark.parametrize("mu", (-1.0, 0.0, 1.0)) +@pytest.mark.parametrize("std", (1.0, 0.1)) +def test_gaussian_score_sampling( + sde_type, predictor, corrector, input_event_shape, mu, std +): + mean0 = mu * torch.ones(input_event_shape) + std0 = std * torch.ones(input_event_shape) + + score_fn = _build_gaussian_score_estimator(sde_type, input_event_shape, mean0, std0) + + sampler = Diffuser(score_fn, predictor, corrector) + + t_min = score_fn.score_estimator.t_min + t_max = score_fn.score_estimator.t_max + ts = torch.linspace(t_max, t_min, 500) + samples = sampler.run(1_000, ts) + + mean_est = samples.mean(0) + std_est = samples.std(0) + + assert torch.allclose(mean_est, mean0, atol=1e-1) + assert torch.allclose(std_est, std0, atol=1e-1) + + +def _build_gaussian_score_estimator( + sde_type: str, + input_event_shape: Tuple[int], + mean0: Tensor, + std0: Tensor, +): + """Helper function for all tests that deal with shapes of density estimators.""" + + # Use discrete thetas such that categorical density esitmators can also use them. + building_thetas = ( + torch.randn((1000, *input_event_shape), dtype=torch.float32) * std0 + mean0 + ) + building_xs = torch.ones((1000, 1)) + + # Note the precondition predicts a correct Gaussian score by default if the neural + # net predicts 0! + class DummyNet(torch.nn.Module): + def __init__(self): + super().__init__() + self.dummy_param_for_device_detection = torch.nn.Linear(1, 1) + + def forward(self, input, condition, time): + return torch.zeros_like(input) + + score_estimator = build_score_estimator( + building_thetas, + building_xs, + sde_type=sde_type, + score_net=DummyNet(), + ) + + score_fn, _ = score_estimator_based_potential( + score_estimator, prior=None, x_o=torch.ones((1,)) + ) + + return score_fn diff --git a/tests/test_utils.py b/tests/test_utils.py index a1cea1e07..8f750f741 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -146,7 +146,6 @@ def check_c2st(x: Tensor, y: Tensor, alg: str, tol: float = 0.1) -> None: chance.""" score = c2st(x, y).item() - print(f"c2st for {alg} is {score:.2f}.") assert ( diff --git a/tutorials/16_implemented_methods.ipynb b/tutorials/16_implemented_methods.ipynb index f26bf7ad3..8f7dda6f5 100644 --- a/tutorials/16_implemented_methods.ipynb +++ b/tutorials/16_implemented_methods.ipynb @@ -72,7 +72,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 60 epochs." + " Neural network successfully converged after 83 epochs." ] }, { @@ -93,7 +93,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 85 epochs." + " Neural network successfully converged after 27 epochs." ] } ], @@ -128,7 +128,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 116 epochs." + " Neural network successfully converged after 304 epochs." ] }, { @@ -150,7 +150,7 @@ "output_type": "stream", "text": [ "Using SNPE-C with atomic loss\n", - " Neural network successfully converged after 56 epochs." + " Neural network successfully converged after 40 epochs." ] } ], @@ -193,7 +193,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 142 epochs." + " Neural network successfully converged after 129 epochs." ] }, { @@ -241,7 +241,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 258 epochs." + " Neural network successfully converged after 180 epochs." ] }, { @@ -262,10 +262,10 @@ "name": "stdout", "output_type": "stream", "text": [ - "The `RestrictedPrior` rejected 40.8%\n", + "The `RestrictedPrior` rejected 44.8%\n", " of prior samples. You will get a speed-up of\n", - " 69.0%.\n", - " Neural network successfully converged after 43 epochs." + " 81.2%.\n", + " Neural network successfully converged after 34 epochs." ] }, { @@ -299,6 +299,78 @@ " proposal = RestrictedPrior(prior, accept_reject_fn, sample_with=\"rejection\")" ] }, + { + "cell_type": "markdown", + "id": "d4379824-e775-46ad-946b-07cfc3ff4c43", + "metadata": {}, + "source": [ + "**Flow Matching for Scalable Simulation-Based Inference**
by Dax, Wildberger, Buchholz, Green, Macke,\n", + "Schölkopf (NeurIPS 2023)
[[Paper]](https://arxiv.org/abs/2305.17161)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "2922328f-2d31-48c8-8ba4-0e0a40e5b308", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Neural network successfully converged after 93 epochs." + ] + } + ], + "source": [ + "from sbi.inference import FMPE\n", + "\n", + "inference = FMPE(prior)\n", + "# FMPE does support multiple rounds of inference\n", + "theta = prior.sample((num_sims,))\n", + "x = simulator(theta)\n", + "inference.append_simulations(theta, x).train()\n", + "posterior = inference.build_posterior().set_default_x(x_o)" + ] + }, + { + "cell_type": "markdown", + "id": "4ad583ea-e140-4cf5-89eb-eb77292c77c3", + "metadata": {}, + "source": [ + "**Neural posterior score estimation**
\n", + "based on:\n", + "- Geffner, T., Papamakarios, G., & Mnih, A. Compositional score modeling for simulation-based inference. ICML 2023.\n", + "- Sharrock, L., Simons, J., Liu, S., & Beaumont, M.. Sequential neural score estimation: Likelihood-free inference with conditional score based diffusion models. arXiv preprint arXiv:2210.04872. ICML 2024.\n", + " \n", + "Note that currently only the single-round variant is implemented." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d1e49c3f-a16d-4e79-ad0b-2fb4cc9ce527", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Neural network successfully converged after 659 epochs." + ] + } + ], + "source": [ + "from sbi.inference import NPSE\n", + "\n", + "theta = prior.sample((num_sims,))\n", + "x = simulator(theta)\n", + "\n", + "inference = NPSE(prior, sde_type=\"ve\")\n", + "_ = inference.append_simulations(theta, x).train()\n", + "posterior = inference.build_posterior().set_default_x(x_o)" + ] + }, { "cell_type": "markdown", "id": "d13f84e2-d35a-4f54-8cbf-0e4be1a38fb3", @@ -317,7 +389,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "id": "d4430dbe-ac60-4978-9695-d0a5b317ee57", "metadata": {}, "outputs": [ @@ -325,7 +397,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 146 epochs." + " Neural network successfully converged after 68 epochs." ] }, { @@ -346,7 +418,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 55 epochs." + " Neural network successfully converged after 24 epochs." ] } ], @@ -375,7 +447,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "id": "d284d6c5-e6f6-4b1d-9c15-d6fa1736a10e", "metadata": {}, "outputs": [ @@ -383,7 +455,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 55 epochs." + " Neural network successfully converged after 167 epochs." ] }, { @@ -405,9 +477,9 @@ "output_type": "stream", "text": [ "\n", - "Converged with loss: 0.04\n", - "Quality Score: 0.192 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n", - " Neural network successfully converged after 86 epochs." + "Converged with loss: 0.03\n", + "Quality Score: 0.108 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n", + " Neural network successfully converged after 25 epochs." ] }, { @@ -429,8 +501,8 @@ "output_type": "stream", "text": [ "\n", - "Converged with loss: 0.0\n", - "Quality Score: -0.034 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n" + "Converged with loss: 0.01\n", + "Quality Score: 0.077 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n" ] } ], @@ -490,7 +562,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "id": "b58c3609-7bd7-40ce-a154-f72a190da2ef", "metadata": {}, "outputs": [ @@ -498,7 +570,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 52 epochs." + " Neural network successfully converged after 76 epochs." ] } ], @@ -522,7 +594,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "id": "e36ab4e7-713f-4ff2-b467-8b481a149861", "metadata": {}, "outputs": [ @@ -530,7 +602,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 70 epochs." + " Neural network successfully converged after 68 epochs." ] }, { @@ -551,7 +623,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 39 epochs." + " Neural network successfully converged after 46 epochs." ] } ], @@ -580,7 +652,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "id": "85e6cf8c", "metadata": {}, "outputs": [ @@ -588,7 +660,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 181 epochs." + " Neural network successfully converged after 228 epochs." ] } ], @@ -612,7 +684,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "id": "1ec55e76-dd86-46d1-a7cc-643324488820", "metadata": {}, "outputs": [ @@ -620,7 +692,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 92 epochs." + " Neural network successfully converged after 83 epochs." ] } ], @@ -641,20 +713,9 @@ "posterior = inference.build_posterior().set_default_x(x_o)" ] }, - { - "cell_type": "markdown", - "id": "75296db0", - "metadata": {}, - "source": [ - "## Flow Matching Posterior Estimation\n", - "\n", - "**Flow Matching for Scalable Simulation-Based Inference**
by Dax, Wildberger, Buchholz, Green, Macke,\n", - "Schölkopf (NeurIPS 2023)
[[Paper]](https://arxiv.org/abs/2305.17161)" - ] - }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "id": "a5fc6047", "metadata": {}, "outputs": [ @@ -662,7 +723,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 66 epochs." + " Neural network successfully converged after 38 epochs." ] } ], @@ -695,7 +756,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 16, "id": "7066ef9b-0e3d-44d3-a80e-5e06de7845ce", "metadata": {}, "outputs": [ @@ -703,13 +764,13 @@ "name": "stdout", "output_type": "stream", "text": [ - " Neural network successfully converged after 164 epochs." + " Neural network successfully converged after 148 epochs." ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "3ad7dfa6a28347d6945466458090c49b", + "model_id": "", "version_major": 2, "version_minor": 0 }, @@ -723,7 +784,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "fc4353a74b0f498d886df56e7db4ebe2", + "model_id": "", "version_major": 2, "version_minor": 0 }, @@ -736,7 +797,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHACAYAAAAyfdnSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAvt0lEQVR4nO3dd3RUdf7/8ddMeoAk1BQSIEoMEUOIoICA4IoCooKyyrqsgBtEhHwRKZZFaYIUwVVcRFYXsK2CBSwUQRE5RGWFNYj0EsoR2FjoECbJfH5/+GNkJEACmUySz/NxTs5h7v3Mve+5N/Phlc9tDmOMEQAAAKzh9HcBAAAAKFsEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwTGBxGrndbu3bt0/VqlWTw+HwdU0ALGSM0dGjRxUXFyens/L9bUo/CsDXStKPFisA7tu3TwkJCaVSHACcz969exUfH+/vMkod/SiAslKcfrRYAbBatWqeBUZERFx6ZQDwO0eOHFFCQoKnv6ls6EcB+FpJ+tFiBcDThysiIiLouAD4VGU9PEo/CqCsFKcfrXwn2gAAAOC8CIAAAACWIQACAABYpljnABaHMUYFBQUqLCwsrUUC+P8CAgIUGBhYac+PAwCUrVIJgC6XS/v379eJEydKY3EAihAeHq7Y2FgFBwf7uxQAQAV3yQHQ7XYrJydHAQEBiouLU3BwMKMUQCkyxsjlcunHH39UTk6OkpKSKuWNkgEAZeeSA6DL5ZLb7VZCQoLCw8NLoyYAvxMWFqagoCDt3r1bLpdLoaGh/i4JAFCBldowAiMSgG/xHQMAlBb+RwEAALAMARAAAMAyBMAi9OnTR926dfN3GQAAAD5RavcBLMrKrl2L3fb6Dz7wYSXl2+jRo7VgwQJlZ2eft92GDRs0cuRIrV27Vrt379bf//53DR48uExqBAAAlUelGQF0uVz+LsHnTpw4ocsuu0wTJ05UTEyMv8sBAAAVVIUNgO3bt1dmZqYGDx6sWrVqqWPHjpKkZ599VqmpqapSpYoSEhI0YMAAHTt2zPO+OXPmKCoqSp988olSUlJUtWpVderUSfv37z/nur755hvVrl1bkyZNKnK+y+VSZmamYmNjFRoaqvr162vChAme+YcOHVLfvn1Vu3ZtRURE6A9/+IPWrVvnqWfMmDFat26dHA6HHA6H5syZU+R6rrnmGj3zzDP605/+pJCQkJJuMgAAAEk+PgTsa6+++qoefPBBZWVleaY5nU5NmzZNiYmJ2rlzpwYMGKBHHnlEL774oqfNiRMnNGXKFL3++utyOp36y1/+omHDhunNN988ax3Lly/XnXfeqcmTJ6tfv35F1jFt2jR9+OGHmjdvnurVq6e9e/dq7969nvl33XWXwsLCtHjxYkVGRmrmzJm68cYbtXXrVvXo0UPff/+9lixZok8//VSSFBkZWVqbCIBFTt+XFUD55nQ6/f5UpwodAJOSkjR58mSvaWeeE9egQQONGzdO/fv39wqA+fn5eumll3T55ZdLkjIzMzV27Nizlj9//nz16tVLr7zyinr06HHOOvbs2aOkpCS1adNGDodD9evX98xbtWqV/vOf/yg3N9czajdlyhQtWLBA7777rvr166eqVasqMDCQw7q4ZCU571ay+9zbysblcmnz5s06deqUv0sBcAEhISFq1KiRX0NghQ6AzZo1O2vap59+qgkTJmjz5s06cuSICgoKlJeXpxMnTnieVBIeHu4Jf5IUGxur3Nxcr+WsXr1aH3/8sd59990LXhHcp08f3XTTTUpOTlanTp1066236uabb5YkrVu3TseOHVPNmjW93nPy5Ent2LHjYj42AJzF7Xbr1KlTCgwMVGBghe7agUqtoKBAp06d8vtofYXuJapUqeL1eteuXbr11lv14IMPavz48apRo4ZWrVqljIwMuVwuTwAMCgryep/D4ZAxxmva5Zdfrpo1a2rWrFnq0qXLWe8509VXX62cnBwtXrxYn376qe6++2516NBB7777ro4dO6bY2FitWLHirPdFRUVd3AcHgHMIDAz0+6ElAOdXUFDg7xIqdgD8vbVr18rtdmvq1Kmex2bNmzfvopZVq1Ytvf/++2rfvr3uvvtuzZs377whMCIiQj169FCPHj30xz/+UZ06ddIvv/yiq6++WgcOHFBgYKAaNGhQ5HuDg4NVWFh4UXUCAACUVIW9CrgoDRs2VH5+vl544QXt3LlTr7/+ul566aWLXl6dOnW0fPlybd68Wffcc885E/uzzz6rt956S5s3b9bWrVv1zjvvKCYmRlFRUerQoYNatWqlbt26aenSpdq1a5e+/PJLjRgxQmvWrJH067mKOTk5ys7O1k8//XTOc3hcLpeys7OVnZ0tl8ulH374QdnZ2dq+fftFf0YAAGAfn44AlvUJ5mlpaXr22Wc1adIkPf7447r++us1YcIE9erV66KXGRMTo+XLl6t9+/bq2bOn/v3vfysgIMCrTbVq1TR58mRt27ZNAQEBuuaaa7Ro0SLPKOSiRYs0YsQI3Xffffrxxx8VExOj66+/XtHR0ZKk7t276/3339cNN9ygQ4cOafbs2erTp89Ztezbt0/p6eme11OmTNGUKVPUrl27Ig8xAwAAFMVhfn/yWxGOHDmiyMhIHT58WBEREV7z8vLylJOTo8TERIWGhvqsUMB2xfmuVeSrgM/Xz1QGvv58eXl5Wr9+vUJDQzkHECjHXC6X8vLylJqaWuq5qST9TKU6BAwAAIALIwACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGV8GgDz8/OVl5dXrJ/8/HxfllLhrVixQg6HQ4cOHSrzdY8ePVpNmzYt8/UWV1H1jR49WtHR0XI4HFqwYEGZ1rNr1y45HA5lZ2eX6XoBACgunz0KLj8/X1u2bNHJkyeL1T4sLEzJyckKCgryVUllbsWKFbrhhht08OBBRUVF+bucSmvYsGH6v//7P8/rTZs2acyYMZo/f75atmyp6tWrl2k9CQkJ2r9/v2rVqiWJ3wMAQPnjswBYWFiokydPKjAwUIGB519NQUGBTp48qcLCwkoVAPGb/Px8n+3bqlWrqmrVqp7XO3bskCR17dpVDofjopd7sTUHBAQoJibmotcLAICv+fwcwMDAQAUHB5/350IBsShut1sTJkxQYmKiwsLClJaWpnfffVeSZIxRhw4d1LFjR51+1PEvv/yi+Ph4jRw5UtJvh1QXLlyoJk2aKDQ0VC1bttT333/vtZ5Vq1apbdu2CgsLU0JCggYNGqTjx4975p86dUqPPvqoEhISFBISooYNG+pf//qXdu3apRtuuEGSVL16dTkcDvXp0+eCtZ+2aNEiXXHFFQoLC9MNN9ygXbt2nXd7FHXY8dChQ3I4HFqxYoXXZ/7ss8/UvHlzhYeH67rrrtOWLVu8ljVx4kRFR0erWrVqysjIUF5e3lnre+WVV5SSkqLQ0FA1atRIL7744lm1zJ07V+3atVNoaKjefPNNn9V85iHg0aNH67bbbpMkOZ1OTwB0u90aO3as4uPjFRISoqZNm2rJkiUXrLlPnz7q1q2bnn76aUVHRysqKkpjx45VQUGBhg8frho1aig+Pl6zZ88u8nOd6/fgtddeU82aNXXq1CmvbdKtWzfde++9Z20rAABKk89GAH1twoQJeuONN/TSSy8pKSlJK1eu1F/+8hfVrl1b7dq106uvvqrU1FRNmzZNDz30kPr376+6det6AuBpw4cP1/PPP6+YmBj97W9/02233aatW7cqKChIO3bsUKdOnTRu3DjNmjVLP/74ozIzM5WZmen5D79Xr1766quvNG3aNKWlpSknJ0c//fSTEhIS9N5776l79+7asmWLIiIiFBYWVqza9+7dqzvvvFMDBw5Uv379tGbNGg0dOrTUtt2IESM0depU1a5dW/3799df//pXZWVlSZLmzZun0aNHa/r06WrTpo1ef/11TZs2TZdddpnn/W+++aZGjhypf/zjH0pPT9e3336r+++/X1WqVFHv3r097R577DFNnTpV6enpl/zA6/PVfKZhw4apQYMGuu+++7R//37P9Oeff15Tp07VzJkzlZ6erlmzZun222/Xhg0blJSUdM6aV6xYoeXLlys+Pl4rV65UVlaWMjIy9OWXX+r666/X6tWrNXfuXD3wwAO66aabFB8f71XPuX4PgoODNWjQIH344Ye66667JEm5ublauHChli5deknbqiRWdu1aovbXf/CBjyoBAJSlChkAT506paefflqffvqpWrVqJUm67LLLtGrVKs2cOVPt2rVT3bp1NXPmTPXq1UsHDhzQokWL9O2335412jhq1CjddNNNkqRXX31V8fHxmj9/vu6++25NmDBBPXv21ODBgyVJSUlJmjZtmtq1a6cZM2Zoz549mjdvnpYtW6YOHTp46jitRo0akqQ6dep4zv0qTu0zZszQ5ZdfrqlTp0qSkpOTtX79ek2aNKlUtt/48ePVrl07Sb8Gni5duigvL0+hoaF67rnnlJGRoYyMDEnSuHHj9Omnn3qNAo4aNUpTp07VnXfeKUlKTEzUxo0bNXPmTK8AOHjwYE8bX9Z8pqpVq3q29ZmHYadMmaJHH31Uf/rTnyRJkyZN0ueff67nnntO06dPP2/NNWrU0LRp0+R0OpWcnKzJkyfrxIkT+tvf/iZJevzxxzVx4kStWrXKs/zTAgICivw9kKQ///nPmj17ticAvvHGG6pXr57at29/kVsJAIDiqZABcPv27Tpx4oQnuJ3mcrmUnp7ueX3XXXdp/vz5mjhxombMmOE10nPa6RAm/foffXJysjZt2iRJWrdunb777juvw5fGGLndbuXk5Gj9+vUKCAjwBJPSqn3Tpk1q0aLFOeu8VE2aNPH8OzY2VtKvo0/16tXTpk2b1L9//7PW/fnnn0uSjh8/rh07digjI0P333+/p01BQYEiIyO93te8efMyqflCjhw5on379ql169Ze01u3bq1169Z5TSuq5saNG8vp/O1siejoaF111VWe1wEBAapZs6Zyc3OL92H+v/vvv1/XXHONfvjhB9WtW1dz5sxRnz59Lum8RQAAiqNCBsBjx45JkhYuXKi6det6zQsJCfH8+8SJE1q7dq0CAgK0bdu2i1rPAw88oEGDBp01r169etq+fftFLVO6cO0ldTqgnD7nUdI5b61z5oUNZ54jVxyn63/55ZfPCqkBAQFer6tUqVIuai6Jomr+/YUgDoejyGklrSc9PV1paWl67bXXdPPNN2vDhg1auHBhyYsGyqEN48eXqH3jESN8VAmAolTIAHjllVcqJCREe/bsOe/o29ChQ+V0OrV48WLdcsst6tKli/7whz94tfn66689o0gHDx7U1q1blZKSIkm6+uqrtXHjRjVs2LDI5aempsrtduuLL77wHAI+U3BwsKRfr4guSe0pKSn68MMPz6rzfGrXri1J2r9/v2ck8WLuQ5eSkqLVq1erV69eRa47OjpacXFx2rlzp3r27Fni5fui5guJiIhQXFycsrKyvLZ5VlaWrr322lJf3+8V9XtwWt++ffXcc8/phx9+UIcOHZSQkODzegAAqJABsFq1aho2bJgefvhhud1utWnTRocPH1ZWVpYiIiLUu3dvLVy4ULNmzdJXX32lq6++WsOHD1fv3r313Xffed0XbuzYsapZs6aio6M1YsQI1apVS926dZMkPfroo2rZsqUyMzPVt29fValSRRs3btSyZcv0j3/8Qw0aNFDv3r3117/+1XMRyO7du5Wbm6u7775b9evXl8Ph0Mcff6xbbrlFYWFhxaq9f//+mjp1qoYPH66+fftq7dq1mjNnznm3SVhYmFq2bKmJEycqMTFRubm5euKJJ0q8bR966CH16dNHzZs3V+vWrfXmm29qw4YNXuc2jhkzRoMGDVJkZKQ6deqkU6dOac2aNTp48KCGDBlS7HWVVs3FMXz4cI0aNUqXX365mjZtqtmzZys7O7vIq5NLW1G/B6dvW/PnP/9Zw4YN08svv6zXXnvN57UAACCVwW1gCgoK5HK5zvtTUFBQ4uU+9dRTevLJJzVhwgSlpKSoU6dOWrhwoRITE/Xjjz8qIyNDo0eP1tVXXy3p19ASHR191vltEydO1EMPPaRmzZrpwIED+uijjzwjNk2aNNEXX3yhrVu3qm3btkpPT9fIkSMVFxfnef+MGTP0xz/+UQMGDFCjRo10//33e24TU7duXY0ZM0aPPfaYoqOjlZmZecHapV8PL7/33ntasGCB0tLS9NJLL+npp5++4DaZNWuWCgoK1KxZMw0ePFjjxo0r8Xbt0aOHnnzyST3yyCNq1qyZdu/erQcffNCrTd++ffXKK69o9uzZSk1NVbt27TRnzhxP/SVRGjUXx6BBgzRkyBANHTpUqampWrJkiT788MMizwstbef6PZCkyMhIde/eXVWrVvX84QEAgK85zJknYJ3DkSNHFBkZqcOHDysiIsJrXl5ennJycpSYmOh1RWZ5fxIIT2dAeXHjjTeqcePGmjZt2nnbneu7dqaS3talpHx5G5jz9TOVga8/X15entavX6/Q0FDPH7H+xDmAQNFcLpfy8vKUmpp6ybdI+72S9DM+OwQcFBSk5OTkIs97KkpAQABPAYFVDh48qBUrVmjFihVeN9IGAMDXfHoOYFBQEKEOOIf09HQdPHhQkyZNUnJysr/LAQBYpEJeBFIa2rdvr2Ic/QZ85kKP9wMAwFd8fhEIAAAAyhcCIAAAgGVKLQByOBXwLb5jAIDScskB8PRFHidOnLjkYgCc2+nvGBdWAQAu1SVfBBIQEKCoqCjl5uZKksLDw3mYPVCKjDE6ceKEcnNzFRUVddYzlwEAKKlSuQo4JiZGkjwhEEDpi4qK8nzXAAC4FKUSAB0Oh2JjY1WnTh3l5+eXxiIBnCEoKIiRPwBAqSnV+wAGBATwnxQAAEA5x21gAAAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAypXofQABAxbBh/PgStW88YoSPKgEqvpJ8n5KGD/dhJcXHCCAAAIBlCIAAAACWKReHgFd27Vqi9td/8IGPKgEAAKj8GAEEAACwTLkYAQQAACgvSnqRVEVULgJgodMpU4L2eXl5PqsFqMgKnL4d1He5XAoODvbpOgAAvuf3AOhyufRjZKQKS/Af1/r1631YEVBx/a96dZ8uf/PmzWrUqBEhEAAqOL8HQLfbrUKnU05j5DTFGwcMDQ31cVVAxRTodvts2W6HQ6dOnZLbh+sAAJQNvwfA05zGKKCY/7Ew+gAUrbjfoYvi48PLAICyQ48OAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGXKzW1gAACXZvuMGQp0u317OyAAlQIjgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIZHwQEAcIYN48eXqH3jESN8VAngO4wAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGq4ABALAQVzvbjRFAAAAAyxAAAQAALEMABAAAsAwBEAAAwDJcBAIA8LuSXJDAxQgVAxeZlG+MAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhquAAQAXVNIrOuEftuwnrjC+dIwAAgAAWIYRQFRI/PUHAMDFYwQQAADAMgRAAAAAyxAAAQAALMM5gAAAoFKz5erokmAEEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMF4EAZYybWAPA2bhQo2wxAggAAGAZAiAAAIBlCIAAAACWIQACAABYpkJeBMJJ9LAJJ0YDAEpbhQyAAAB7MQgAXDoOAQMAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZbgPIHCJuFEzAKCiYQQQAADAMgRAAAAAyxAAAQAALEMABAAAsAwXgViMB6oDAGAnRgABAAAsQwAEAACwDIeAAQCVGvfqBM7GCCAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJbhUXAAAJRT5ekxduWpFlw6RgABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALMNVwAAAlBGupEV5wQggAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGW4ChgAgEvAlb2oiBgBBAAAsAwBEAAAwDIcAoYVSnqIpvGIET6qBAAA/2MEEAAAwDIEQAAAAMsQAAEAACxjxTmAvjz/i3PLKidu6wAAqMwYAQQAALAMARAAAMAyBEAAAADLEAABAAAsY8VFIBUZF5kAAIDSxgggAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYJ9HcB5dGG8eP9XcJF82XtJV124xEjfLZsAABw8RgBBAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALBMoL8LAACUHrfDITn52x4orwoKCvxdgiQCIABUCk6nUwFutwqdzl9DIIByqaCgQCEhIXL6+Q81AiAAVALBwcGqffiwjL8LAXBeqampcjqdCg4O9msdBEAAqCQC3G5/lwDgAkJDQ/1dgiQuAgEAALAOARAAAMAyHAIuYxvGj/d3CQAAwHKMAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACW4SIQ+AwXvAAAUD4xAggAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFgm0N8FnOZ2OCQneRQor9wOh79LAACUEr8HQKfTqQC3W4VOJ//BAOVcSEiInPyhBgAVnt8DYHBwsGofPizj70IAXFCjRo0UHBzs7zIAAJfI7wFQkgLcbn+XAKAYCH8AUDlwLAcAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALBMYHEaGWMkSUeOHPFJEcfz832yXACly1d9wJnLPt3fVDa+7kcl+lKgIigv/WixAuDRo0clSQkJCZdQFoAKLzLS56s4evSoIstgPWWNfhSApHLTjzpMMWKi2+3Wvn37VK1aNTkcjlIrUPo1rSYkJGjv3r2KiIgo1WXDf9ivlY+v96kxRkePHlVcXJyczsp3doov+1GJ71xlxD6tnHy5X0vSjxZrBNDpdCo+Pr5UijuXiIgIfsErIfZr5ePLfVoZR/5OK4t+VOI7VxmxTysnX+3X4vajle/PbAAAAJwXARAAAMAyfg+AISEhGjVqlEJCQvxdCkoR+7XyYZ+Wb+yfyod9WjmVl/1arItAAAAAUHn4fQQQAAAAZYsACAAAYBkCIAAAgGUIgAAAAJbxewCcPn26GjRooNDQULVo0UL/+c9//F0SzmH06NFyOBxeP40aNfLMz8vL08CBA1WzZk1VrVpV3bt31//+9z+vZezZs0ddunRReHi46tSpo+HDh6ugoKCsP4q1Vq5cqdtuu01xcXFyOBxasGCB13xjjEaOHKnY2FiFhYWpQ4cO2rZtm1ebX375RT179lRERISioqKUkZGhY8eOebX57rvv1LZtW4WGhiohIUGTJ0/29UezGv1oxUE/WjlUhr7UrwFw7ty5GjJkiEaNGqX//ve/SktLU8eOHZWbm+vPsnAejRs31v79+z0/q1at8sx7+OGH9dFHH+mdd97RF198oX379unOO+/0zC8sLFSXLl3kcrn05Zdf6tVXX9WcOXM0cuRIf3wUKx0/flxpaWmaPn16kfMnT56sadOm6aWXXtLq1atVpUoVdezYUXl5eZ42PXv21IYNG7Rs2TJ9/PHHWrlypfr16+eZf+TIEd18882qX7++1q5dq2eeeUajR4/WP//5T59/PhvRj1Y89KMVX6XoS40fXXvttWbgwIGe14WFhSYuLs5MmDDBj1XhXEaNGmXS0tKKnHfo0CETFBRk3nnnHc+0TZs2GUnmq6++MsYYs2jRIuN0Os2BAwc8bWbMmGEiIiLMqVOnfFo7zibJzJ8/3/Pa7XabmJgY88wzz3imHTp0yISEhJi33nrLGGPMxo0bjSTzzTffeNosXrzYOBwO88MPPxhjjHnxxRdN9erVvfbpo48+apKTk338iexEP1qx0I9WPhW1L/XbCKDL5dLatWvVoUMHzzSn06kOHTroq6++8ldZuIBt27YpLi5Ol112mXr27Kk9e/ZIktauXav8/Hyv/dmoUSPVq1fPsz+/+uorpaamKjo62tOmY8eOOnLkiDZs2FC2HwRnycnJ0YEDB7z2YWRkpFq0aOG1D6OiotS8eXNPmw4dOsjpdGr16tWeNtdff72Cg4M9bTp27KgtW7bo4MGDZfRp7EA/WjHRj1ZuFaUv9VsA/Omnn1RYWOj1SyxJ0dHROnDggJ+qwvm0aNFCc+bM0ZIlSzRjxgzl5OSobdu2Onr0qA4cOKDg4GBFRUV5vefM/XngwIEi9/fpefCv0/vgfN/JAwcOqE6dOl7zAwMDVaNGDfazH9CPVjz0o5VfRelLAy95CbBG586dPf9u0qSJWrRoofr162vevHkKCwvzY2UAUDHQj6K88NsIYK1atRQQEHDW1U3/+9//FBMT46eqUBJRUVG64oortH37dsXExMjlcunQoUNebc7cnzExMUXu79Pz4F+n98H5vpMxMTFnXVxQUFCgX375hf3sB/SjFR/9aOVTUfpSvwXA4OBgNWvWTJ999plnmtvt1meffaZWrVr5qyyUwLFjx7Rjxw7FxsaqWbNmCgoK8tqfW7Zs0Z49ezz7s1WrVlq/fr3XL/2yZcsUERGhK6+8sszrh7fExETFxMR47cMjR45o9erVXvvw0KFDWrt2rafN8uXL5Xa71aJFC0+blStXKj8/39Nm2bJlSk5OVvXq1cvo09iBfrTiox+tfCpMX1oql5JcpLffftuEhISYOXPmmI0bN5p+/fqZqKgor6ubUH4MHTrUrFixwuTk5JisrCzToUMHU6tWLZObm2uMMaZ///6mXr16Zvny5WbNmjWmVatWplWrVp73FxQUmKuuusrcfPPNJjs72yxZssTUrl3bPP744/76SNY5evSo+fbbb823335rJJlnn33WfPvtt2b37t3GGGMmTpxooqKizAcffGC+++4707VrV5OYmGhOnjzpWUanTp1Menq6Wb16tVm1apVJSkoy99xzj2f+oUOHTHR0tLn33nvN999/b95++20THh5uZs6cWeaf1wb0oxUL/WjlUBn6Ur8GQGOMeeGFF0y9evVMcHCwufbaa83XX3/t75JwDj169DCxsbEmODjY1K1b1/To0cNs377dM//kyZNmwIABpnr16iY8PNzccccdZv/+/V7L2LVrl+ncubMJCwsztWrVMkOHDjX5+fll/VGs9fnnnxtJZ/307t3bGPPr7QuefPJJEx0dbUJCQsyNN95otmzZ4rWMn3/+2dxzzz2matWqJiIiwtx3333m6NGjXm3WrVtn2rRpY0JCQkzdunXNxIkTy+ojWol+tOKgH60cKkNf6jDGmEsfRwQAAEBF4fdHwQEAAKBsEQABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwB0EKjR49W06ZN/V1GiTVo0EDPPffcJS1jzpw5ioqK8ryuqNsCgH9V1L6DfhSnEQArgBUrVsjhcJz1gPCLNWzYMK9nFNrMV9ti5cqVuu222xQXFyeHw6EFCxaU+joAFB/9qO/4altMmDBB11xzjapVq6Y6deqoW7du2rJlS6mvx1YEQIsYY1RQUKCqVauqZs2al7SsMx9OXRrt/KU0tkVRjh8/rrS0NE2fPr3Ulw3Af+hHz+arfvSLL77QwIED9fXXX2vZsmXKz8/XzTffrOPHj5f6umxEACwF7du3V2ZmpjIzMxUZGalatWrpySef1JlP2Tt48KB69eql6tWrKzw8XJ07d9a2bds883fv3q3bbrtN1atXV5UqVdS4cWMtWrRIu3bt0g033CBJql69uhwOh/r06SNJcrvdmjBhghITExUWFqa0tDS9++67nmWe/ot38eLFatasmUJCQrRq1aqzhuvdbrfGjh2r+Ph4hYSEqGnTplqyZIln/q5du+RwODR37ly1a9dOoaGhevPNN4vcFg6HQzNmzNDtt9+uKlWqaPz48SosLFRGRoanzuTkZD3//PNe7+vTp4+6deumKVOmKDY2VjVr1tTAgQPP2/G98sorioqKOu9fnnPmzFG9evUUHh6uO+64Qz///LPX/N9vi9N1PP3004qOjlZUVJTGjh2rgoICDR8+XDVq1FB8fLxmz559znVKUufOnTVu3Djdcccd520H4Ff0o7+hH/3VkiVL1KdPHzVu3FhpaWmaM2eO9uzZo7Vr1573fSimUnuqsMXatWtnqlatah566CGzefNm88Ybb5jw8HDzz3/+09Pm9ttvNykpKWblypUmOzvbdOzY0TRs2NC4XC5jjDFdunQxN910k/nuu+/Mjh07zEcffWS++OILU1BQYN577z0jyWzZssXs37/fHDp0yBhjzLhx40yjRo3MkiVLzI4dO8zs2bNNSEiIWbFihTHmt4dVN2nSxCxdutRs377d/Pzzz2bUqFEmLS3NU9uzzz5rIiIizFtvvWU2b95sHnnkERMUFGS2bt1qjDEmJyfHSDINGjQw7733ntm5c6fZt29fkdtCkqlTp46ZNWuW2bFjh9m9e7dxuVxm5MiR5ptvvjE7d+70bJ+5c+d63te7d28TERFh+vfvbzZt2mQ++uijs7Zh/fr1zd///ndjjDGTJk0yNWvWNKtXrz7nfvn666+N0+k0kyZNMlu2bDHPP/+8iYqKMpGRkZ42v98WvXv3NtWqVTMDBw40mzdvNv/617+MJNOxY0czfvx4s3XrVvPUU0+ZoKAgs3fv3vP8Vnhvk/nz5xerLWAr+tHf0I8Wbdu2bUaSWb9+fbHfg3MjAJaCdu3amZSUFON2uz3THn30UZOSkmKMMWbr1q1GksnKyvLM/+mnn0xYWJiZN2+eMcaY1NRUM3r06CKXf7oDOnjwoGdaXl6eCQ8PN19++aVX24yMDHPPPfd4vW/BggVebX7/ZY2LizPjx4/3anPNNdeYAQMGGGN+67iee+65C24LSWbw4MEXbDdw4EDTvXt3z+vevXub+vXrm4KCAs+0u+66y/To0cPz+nTH9cgjj5jY2Fjz/fffn3cd99xzj7nlllu8pvXo0eOCHVf9+vVNYWGhZ1pycrJp27at53VBQYGpUqWKeeutty74OY0hAALFQT/6G/rRsxUWFpouXbqY1q1bF6s9LiywDAcbK7WWLVvK4XB4Xrdq1UpTp05VYWGhNm3apMDAQLVo0cIzv2bNmkpOTtamTZskSYMGDdKDDz6opUuXqkOHDurevbuaNGlyzvVt375dJ06c0E033eQ13eVyKT093Wta8+bNz7mcI0eOaN++fWrdurXX9NatW2vdunXFXs6F2k2fPl2zZs3Snj17dPLkSblcrrOuGmvcuLECAgI8r2NjY7V+/XqvNlOnTtXx48e1Zs0aXXbZZeetY9OmTWcdgm3VqpXXYZmiNG7cWE7nb2dHREdH66qrrvK8DggIUM2aNZWbm3ve5QAoGfrR87ezuR8dOHCgvv/+e61atapY7XFhnANYTvTt21c7d+7Uvffeq/Xr16t58+Z64YUXztn+2LFjkqSFCxcqOzvb87Nx40av81ckqUqVKqVSY3GX8/t2b7/9toYNG6aMjAwtXbpU2dnZuu++++RyubzaBQUFeb12OBxyu91e09q2bavCwkLNmzfvIj5B8RRVR3FqA+Bf9KOVsx/NzMzUxx9/rM8//1zx8fGlWqfNCIClZPXq1V6vv/76ayUlJSkgIEApKSkqKCjwavPzzz9ry5YtuvLKKz3TEhIS1L9/f73//vsaOnSoXn75ZUlScHCwJKmwsNDT9sorr1RISIj27Nmjhg0bev0kJCQUu+6IiAjFxcUpKyvLa3pWVpZXbZciKytL1113nQYMGKD09HQ1bNhQO3bsuKhlXXvttVq8eLGefvppTZky5bxtU1JSitwvAMon+tFzs7EfNcYoMzNT8+fP1/Lly5WYmFgm67UFh4BLyZ49ezRkyBA98MAD+u9//6sXXnhBU6dOlSQlJSWpa9euuv/++zVz5kxVq1ZNjz32mOrWrauuXbtKkgYPHqzOnTvriiuu0MGDB/X5558rJSVFklS/fn05HA59/PHHuuWWWxQWFqZq1app2LBhevjhh+V2u9WmTRsdPnxYWVlZioiIUO/evYtd+/DhwzVq1Chdfvnlatq0qWbPnq3s7OxzXqFWUklJSXrttdf0ySefKDExUa+//rq++eabi/4yX3fddVq0aJE6d+6swMBADR48uMh2gwYNUuvWrTVlyhR17dpVn3zyyQUPW5SWY8eOafv27Z7XOTk5ys7OVo0aNVSvXr0yqQGoaOhHz83GfnTgwIH697//rQ8++EDVqlXTgQMHJEmRkZEKCwsrkxoqM0YAS0mvXr108uRJXXvttRo4cKAeeugh9evXzzN/9uzZatasmW699Va1atVKxhgtWrTIMyReWFiogQMHKiUlRZ06ddIVV1yhF198UZJUt25djRkzRo899piio6OVmZkpSXrqqaf05JNPasKECZ73LVy4sMQdwqBBgzRkyBANHTpUqampWrJkiT788EMlJSWVyrZ54IEHdOedd6pHjx5q0aKFfv75Zw0YMOCSltmmTRstXLhQTzzxxDkP8bRs2VIvv/yynn/+eaWlpWnp0qV64oknLmm9xbVmzRqlp6d7ziMaMmSI0tPTNXLkyDJZP1AR0Y+em4396IwZM3T48GG1b99esbGxnp+5c+eWyforO4cxZ9xkCRelffv2atq06SU/XgcAbEU/CpQtRgABAAAsQwAEAACwDIeAAQAALMMIIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZf4fknFmeJXkNz4AAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHACAYAAAAyfdnSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAwlElEQVR4nO3de5xN9f7H8ffeM2bGMGNc52LcYjIa14iELic1SRKSI6dGOYnGUS6JyiXlUqFQISnqiC6USOIUKZf5RUS5XzIKKcUgc93f3x9lZzOYMbNnz8z39Xw85vGw9/rutT57rT1f7/mu9d3LYYwxAgAAgDWcvi4AAAAABYsACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACW8c9JI5fLpQMHDigkJEQOh8PbNQGwkDFGx48fV1RUlJzO4ve3Kf0oAG/LTT+aowB44MABValSJV+KA4AL2b9/v6Kjo31dRr6jHwVQUHLSj+YoAIaEhLhXGBoamvfKAOAsKSkpqlKliru/KW7oRwF4W2760RwFwNOnK0JDQ+m4AHhVcT09Sj8KoKDkpB8tfhfaAAAA4IIIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFjG39cFFEYr27fPcdtrFyzwYiUAkHP0XQByihFAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLFIo7gaSnp8vlcvm6DLdMZ85zcWpqqhcrAQoXp9OpgIAAX5cBAMgjnwfA9PR0bdu2TWlpab4uxe3nsmVz3Hbz5s1erAQoXAIDAxUbG0sIBIAizucB0OVyKS0tTf7+/vL393k5kiT/XIxGBgUFebESoPDIzMxUWlpaoRqtBwBcmsKRuCT5+/sXmlEFv1z8B1dYagYKQmZmpq9LAADkAyaBAAAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWKTRfBI3sfT9qVK7axz3xhJcqAQAAxQUjgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCsUkkF1Tpsjf5ZKfy5Wj9kx0AAAAuHSMAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWKRSTQAD4Tk7vNpPldKp6YqKXqwEAFARGAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsUyRnAed01uJp3DoOAADgb4wAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZfx9XQAAIH9kOZ0yOWybmprq1VoAnJ/T6VRAQIBPayAAAkAxkJ6erl/KlFGWM2cndjZv3uzligCcT2BgoGJjY30aAgmABez7UaN8XQKAYsjlcinL6ZTTGDnNxccBg4KCCqAqAGfLzMxUWlqaXC6XT+sgAAJAMeI0Rn45+I/F16efAJtlZmb6ugQmgQAAANiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAluGLoIF8kJs7vMQ98YQXK+FuMwCAi2MEEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyzAIGssFMWgBAccYIIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZKyaBcEE/AADA3xgBBAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMlbcCg75I7e31It74gkvVQIAAPKCEUAAAADLEAABAAAsQwAEAACwDAEQAADAMkwCgRVyO4EFAIDijBFAAAAAyzACCADId3xtFFC4MQIIAABgGQIgAACAZQiAAAAAluEaQKCAcW0UCgM+h4DdGAEEAACwDAEQAADAMgRAAAAAyxAAAQAALMMkkDziFmMAULCYwALkHSOAAAAAliEAAgAAWIYACAAAYBmuAQQAAEWON6/Bt+G6UQIgvIYLtQEAKJw4BQwAAGAZAiAAAIBlCIAAAACW4RpAAACAPMjNNe9ZTqeqJyZ6sZqcYQQQAADAMowAFjO5+SuEWbcAANiJEUAAAADLMAIIACjW+E5S32C/F26MAAIAAFiGAAgAAGAZTgGjSPLmPSABACjuCIAAgIvijy6geOEUMAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGX8fV0AAABF2fejRuW4bdwTT3ixEiDnGAEEAACwDAEQAADAMgRAAAAAy3ANIADA53JzHZ1NcrtfuMYQOcUIIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZJoEAAACcwYZJSYwAAgAAWIYACAAAYBlOAaPQsGHIHQCAwoAACABAASnKf+gW5dpxLk4BAwAAWIYACAAAYBkCIAAAgGUIgAAAAJZhEojFuKAXAAA7MQIIAABgGQIgAACAZQiAAAAAluEaQAAALFWYrgUvTLXYgAAIFHJ0igCA/MYpYAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALCMv68LAAAA+eP7UaN8XQKKCEYAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsUmkkgLodDcpJHgcLK5XD4ugQAQD7xeQB0Op3yc7mU5XTyHwxQyAUGBsrJH2oAUOT5PAAGBASo4rFjMr4uBMBFxcbGKiAgwNdlAADyyOcBUJL8XC5flwAgBwh/AFA8cC4HAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAy/j7ugAAQP5xORySk7/t8yI9PT1X7bPY38gFl8Ph6xIkEQABoFhwOp3yc7mU5XQWmv9giqrU1NRctc8kACKXAgMD5fTx54YACADFQEBAgCoeOybj60KKgXr16uWq/fHff/dSJSiuYmNjFRAQ4NMaCIAAUEz4uVy+LqFYCAoKylV7f/Y7csnX4U9iEggAAIB1GAEEAOAMK9u393UJgNcxAggAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZfzzc2VZWVnKyMjI9esc5cvnZxlA8WKMzIkTUnq6rysBABQT+RIAjTE6dOiQjh49ekmvL52QkB9lAMWWycxU2oYNyvjyS1+XAgAoBvIlAJ4Of5UqVVJwcLAcDkeuXn/SyZlo4HyMpLSsLP0SGChJhEAAQJ7lOQBmZWW5w1/5SzyVm+Hnl9cygGIt0M9PKldOPzdqpIykJE4HAwDyJM9Db6ev+QsODs5zMQDOL9DPTw5/fzlKl/Z1KQCAIi7fzr3m9rQvgNxx/4bxuwYAyCMuvgMAALAMATAbvQYNUtfevX1dBgAAgFfk6/cAnm1l+/Y5apeVlqYmL73kzVIKtdGTJunjZcu0auHCC7bbunOnRr34ojZ+/72Sf/pJYx5/XIn33VdAVQIAgOKi2IwAplswK/KPU6dUvUoVjRg4UOEVK/q6HAAAUEQV2QB4a7duGvDUU3rsmWdUvWlTdbj/fknSS6+/rqvbtlVE/fqq06qV+g0frhMnT7pfN3vePFW58kr978sv1SQ+XpENGqjD/ffr0OHD593W+k2bVKNpU70wbVq2y9PT0zXgqacUc801qhgXp7jrrtP4qVPdy4+mpKjP44+rRtOmqtywoW675x5t3rrVXc/YyZO1eds2hcbEKDQmRrPnzct2O43r19czgwfrzttuU2BAQK73GQAAgOTlU8DeNueDD9Sja1ctmzvX/ZzD4dBzQ4eqWnS0fti/X/1HjNDQ557TC0895W7zR2qqJr/2ml4dN05Op1MPDBigJ8aO1YwJE87Zxhdr1uhfiYkaOWiQ7vvnP7OtY+qbb+qTzz7TrIkTFR0VpZ8OHtSPBw+6lyf85z8KCgrSvBkzFBoSojfmzlW7hAR9s3SpOrZtqy07d+p/K1fqo1mzJEmhISH5tYsAAADOUaQDYM1q1fT0Y495PHfmNXHVoqM19JFH9Mjw4R4BMCMjQy+MHKnLqlWTJPW85x49m801iAuXLtWDgwZp8qhR6tS27Xnr2H/ggGpWr67mTZrI4XCoauXK7mVr1q3T+k2btHvtWgX+dSeHUYMHa9GyZVqwZInu++c/VSo4WP5+fpzWBQAABaJIB8CGdeue89zyVas0Ydo07dizR8dPnFBmZqZS09L0x6lTCi5ZUpIUXLKkO/xJUkTFivrlyBGP9az79lstWb5cb02erNtuuumCdXTr1Entu3fXlTffrNatWumWG27Qja1aSZI2b9umE3/8oepNm3q85lRqqvYmJ1/S+wYAAMiLIh0ATwe60/b9+KPu6tlTPe6+W0P79VPZsDCtXbdOiY8/rvSMDHf7Ev6eb9vhcMgY4/FcjapVVS4sTG+9/77ir79eJUqUOG8dDePitPnzz7Vs5UqtWL1a3R9+WNdfc43eeuklnTx5UhEVK+rj2bPPeV0Yp3oBAIAPFOkAeLaN330nlzEaPWSInM4/57d8sHjxJa2rfNmymv3yy7q1WzclPPywZk2ceMEQGBoSok5t26pT27Zqf8st6nj//frt6FE1iIvTz7/+Kn8/P1WLjs72tQElSijL5bqkOgEAAHKryM4Czs5l1aopIyNDU998U3uTkzXnww/1+pw5l7y+iuXLa9Fbb2nH7t26v18/ZWZmZtvupddf13sLF2rH7t3auXevPvzkE4VXrKiw0FDd0KKFmjZqpLt799ZnX36pfT/+qKRvvtHICRP0zebNkqSqlStr348/atOWLTry229KS0vLdjvp6enatGWLNm3ZovSMDB38+Wdt2rJFu/ftu+T3CAAA7OPVEcBrFyzIUbvju3bly/bq1amj0Y8/rhenT9dT48frmquu0vCBA/Xgo49e8jrDK1bUorfe0q3/+pf+PWCAZkyYID8/P482pUuV0sTp07V73z75OZ26sn59vT99unsU8v3p0zVywgQ9NGSIfv3tN4VXqKBrrrpKlSpUkCS1j4/XwqVLdds99+hoSoqmjB2rbp06nVPLwcOH1fKML9eeNGOGJs2YoZZNm2pxNqeYAQAAsuMwZ1/8lo2UlBSVKVNGx44dU2hoqMey1NRU7d27VzVq1FBQUNAlFZFfARAoztKzspR86JBOzJolc9akpYKS0z/qLsWF+pnioCDeX07vvgTAt7zVl+amnylWp4ABAABwcQRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMAbCI+DIpSaExMTqaklLg2x49aZJatGtX4NvNqezqGz1pkmpefbVCY2K0aNmyAq1n348/KjQmRpu2bCnQ7QIAkFNeDYAZGRlKTU296E9aeroyznOf3aLMl6HNJn179NBHb77pfrx91y6NnTxZE59+WjtXr9ZN115boPVER0Zq5+rVuuLyyyXxOQAAFD5euxdwRkaGtm/frlOnTl20bdovvygwIEA1o6NVwt+rtyeGj2RkZKhEiRJeWXfpUqVUulQp9+M9ycmSpLatW8vhcFzyei+1Zj8/P4VXrHjJ2wUAwNu8NgKYlZWlU6dOyd/fX0FBQRf88fPzU1p6ulwuV47X73K5NH7qVNW74QZVqltX17Rrpw8/+USSZIzR7QkJuuO++3T6Vse/HT2q2JYt9cyLL0r6e1RmyfLlan7bbaoYF6d/3HmntuzY4bGdNevWKb5rV1WqW1d1WrXSoyNH6uQff7iXp6Wladhzz6lOq1aqcMUVanDjjXrzvfe078cf1fZf/5IkVW3cWKExMeo1aNBFaz/t0xUr1Oimm1Spbl21/de/lPzjjxfcH9mddjyakqLQmBh9mZTk8Z5XrF6t6zp0UHi9emp9113auWePx7omTJummldfraiGDZU4ZIjS0tLO2d6sd99Vk/h4VYyLU+P4eE2fPfucWuZ9/LHa3H23KsbF6d2PPvJazWeeAh49aZK6PPigJKnM5ZcrNCbGvc/HTp6s2JYtVeGKK9SiXTstW7nyojX3GjRIXXv31rgpU1Tz6qtV5corNXbyZGVmZurJsWNVtUkTxbZsqf++/3627+t8n4O3P/hA1a666px927V3bz0wcOC5BxgAgHzk9WsA/f39FRAQcMEffz+/XK93/NSpmvPBB3ph5EglLV6sxO7d9cDAgfoqKUkOh0NTn31W32zerCmzZkmS+g0bpsjwcA3u08djPUOffVajBg/WivnzVaFcOXV58EFlZGRIkvbs26eOPXro9ptv1upFizRz4kStXb9eA596yv36BwcN0vuLFum5oUP19ZIlmvj00yoVHKzoyEj996WXJEnrly7VztWr9ezQoRetXZJ+PHhQ/0pMVJt//EOrPvpI93burOHjxuV+55/HyAkTNGrwYH0xf778/fz00JAh7mXzFy/WmEmTNHzAAH0xf77CK1bUa2eEO0l6Z8ECjZo4UcP699fXS5ZoeP/+eubFFzV7/nyPdiPGjVPvhAR9vWSJbmzVyms1n6lvjx6aMnasJGnn6tXauXq1JOmVWbP00uuv65nBg7Vm0SLd2KqV/tmrl3b98MNFa165Zo0OHj6sJW+/rdFDhmj0pEnq3LOnwsqU0efvv6/7u3bVw8OG6aeDB8+p53yfgw5t2siVlaXFn3/ubvvLkSP6dMUK3XPnnXnaVwAAXEyRPN+alpam8VOnasGsWWrWqJEkqUbVqlqzfr1enztXLZs1U1REhCY+/bQefPRRHf71Vy394gt9uWCB/M86xTz4P//RP1q2lCRN/Wskb+GyZep4662aMG2a7mrXTon33SdJqlW9up4bOlRtunXTCyNHav+BA5q/eLEWzJypG1q0cNdxWtmwMElSxfLlFRYamuPaZ7z9tmpUrarRf4WcmMsu05YdO/TCq6/my/4b1r+/WjZrJknq9+CD6vzAA0pNS1NQYKBemTlT93TurHs7d3a3XbF6tcdI1ehJkzRq8GDdHh8vSapepYq27dqlN+bOVbeOHd3teickuNt4s+YzlS5VSmX+2tdnnoadPGOGHunZU3fedpskaeSgQVqZlKRXZs7UhBEjLlhz2bAwPT90qJxOp2Iuu0wvvvaaTp06pYG9e0uSBvTqpRdefVVr1q93r/80Pz+/bD8HknRnu3b677x56tCmjaQ/g3V0ZKRa/fU+AQDwliIZAPckJ+uPU6d0R/fuHs+nZ2Sofp067scd2rTRwqVLNWHaNL3w1FOqVb36Oetq+lcIk6RyYWGKqVFD23fvliRt3rZN32/bpncXLnS3McbI5XJp3/79+n7HDvn5+all06b5Wvv23bvVpEGD89aZV3Vr13b/O+KvkPTLkSOqEhWlHbt36/6uXc/Z9pdr10qSTv7xh/YmJ6vP44+r75NPuttkZmYqNCTE43VX1qtXIDVfTMrx4zr488+6+sorPZ6/+sor9d22bR7PZVdzbK1acjr/HiyvVL686vw1wUP6M+SVCwvTL0eO5OzN/KX7XXfp+k6ddODQIUVFRGj2/Pnq1rFjnq5bBAAgJ4pkADxx8qQk6b1XX1VkRITHssCAAPe//zh1Shu//15+fn7avW9frrdz8uRJ3ffPf6pXQsI5y6pERmrPJawzp7Xn1umAcvqaR0nK/OtU9tn8z5jYcDps5PT6y9PXP0565hk1adjQY5mf0/OKguCSJQtFzbmRXc1nTwRxOBznTFZyOBy5rqdBXJzqxcZqzocf6h8tW2rrzp16b/r03BcNAEAuFckAGFurlgIDArT/4EH3acHsPDFmjJwOh+a99prufOABxV9/va5r3tyjzdcbN7pHkX4/dky7fvhBtWvWlPTnf9Dbd+9WzWrVsl3/FbVry+Vy6av/+z/3KeAzBfwVHLKysnJVe+2aNbX4s8/OqfNCKpQrJ0k69MsvOj12uGnr1gu+JjuX16ypdd9+q7s7dMh225UqVFBkeLh+2L9fXdq3z/X6z5RfNV9MaEiIIsPDtfabbzz2+dpvvlHj+vXzfXtny+5zcNq9nTvrlVmzdODnn3X9NdcoOjLS6/UAAFAkA2BI6dL6T48eGjJ6tFwul5o3bqyUEye0dv16hZQurW4dO2rJ8uV66/339b/33lPDuDg9/O9/q9egQVq9aJHKlinjXtezL72kcmFhqlShgkZOmKDyZcvqttatJUn9evbUjZ07a8BTTymhc2cFBwdr+65d+nzVKo0fPlzVoqN1d4cOShwyRM8NHaq6sbHaf+CAfjlyRB1vvVVVoqLkcDi0ZPlyxV93nYKCgnJU+/1du2ryjBl6cuxYJdx1lzZ89905EyzOVjIoSFc1bKgXpk1T9eho/XLkiJ5+4YVc79veCQnq/dhjurJuXTVr3FjvfvSRtu3cqepVqrjbPN63rwY9/bRCQ0LU+tprlZ6erg2bN+toSor63H9/jreVXzXnRN8ePTRm0iTVqFpV9evU0X/nzdPmrVv12vjxXtnembL7HJz+2prOt9+uJ599VrPeeUfTnn/e67UAACAVwCzgzMxMpaenX/AnM5uRkYsZ2q+fBj30kCZMm6ar2rRRx/vv16crVqh6dLR+PXJEfR5/XEP69lXDuDhJf4aWShUqqN+wYR7rGTFwoB575hlde8cd+vmXX/TOtGkK+OtUbN3YWC2ePVu79u7VLXffrVbt22vUxImKrFTJ/foXRo5U+1tuUf8RI9QkPl7/eeIJ/fHXadKoiAg93revRowbp5rNm7tnD1+odunPwPDWSy9p0f/+p2vatdPrc+ZoeP/+F90nr4wZo8ysLF3boYMGjxqlof365Xq/dmrbVoMSEzX0ued0XYcO2v/TT+px990ebRLuukuTR43S7Hnz1LxtW7Xp1k2z589Xtb/qz438qDkneickKPH++/XEmDG6+rbb9L+VKzV36tRsrwvNb+f7HEhSmZAQ3R4fr1KlSrn/8AAAwNsc5swLsM4jJSVFZcqU0bFjxxR6xixGSUpNTdXevXtVo0YNBQUFuZ8v7F8E/WVS0p/fr7d+vcfMTKCgtbv3XsXWqqXnz/rj5GzpWVlKPnRIJ2bNksnlhJP8cu2CBV5b94X6meKgIN7fyjxelgGgYHirL81NP+O1tFWiRAnVrl072+ueznZizx45nU7uAgKr/H7smL5KStKXSUkeX0UDAIC3eTVxlShRIke30srIw+xXoKhq1b69jh47ppGPPqqYyy7zdTkAAItYO+TWqlkzpezc6esyYLHvVqzwdQkAAEt5fRIIAAAAChcCIAAAgGXyLQDmYDIxgDxw/4bxuwYAyKM8B8DTkzxOf/cdAO9Iy8qSycyUOXHC16UAAIq4PE8C8fPzU1hYmA4fPixJCg4OzvXN7NMv4YugAVsY/Rn+fvntN6Vt2CClp/u6JABAEZcvs4AjIiIkyR0Ccyv1El8H2MJkZiptwwZlfPmlr0sBABQD+RIAHQ6HIiMjValSJWVkZOT69V9zD1Tg/Iz587QvI38AgHySr98D6OfnJz8/v1y/zle3tQIAALARXwMDAABgGQIgAACAZQiAAAAAlsnRNYCnv+Q5JSXFK0WcvISJIwAKnrf6gDPXXVy/VN7b/ahEXwoUFd7qB3LTj+YoAB4/flySVKVKlTyUBaDIK1PG65s4fvy4yhTAdgoa/SgANy/3cTnpRx0mBzHR5XLpwIEDCgkJyfWXPF9MSkqKqlSpov379ys0NDRf1w3f4bgWP94+psYYHT9+XFFRUXI6i9/VKd7sRyV+54ojjmnx5M3jmpt+NEcjgE6nU9HR0flS3PmEhobyAS+GOK7FjzePaXEc+TutIPpRid+54ohjWjx567jmtB8tfn9mAwAA4IIIgAAAAJbxeQAMDAzU8OHDFRgY6OtSkI84rsUPx7Rw4/gUPxzT4qmwHNccTQIBAABA8eHzEUAAAAAULAIgAACAZQiAAAAAliEAAgAAWMbnAfDll19W9erVFRQUpGbNmun//u//fF0SzmPEiBFyOBweP7Gxse7lqampSkxMVPny5VW6dGl16tRJP//8s8c6kpOT1bZtWwUHB6tSpUp69NFHlZmZWdBvxVorV65Uu3btFBUVJYfDoQ8//NBjuTFGw4YNU2RkpEqWLKnWrVtr586dHm1+++03devWTaGhoQoLC1OPHj104sQJjzabNm1Sq1atFBQUpCpVqui5557z9luzGv1o0UE/WjwUh77UpwHwnXfeUf/+/TV8+HB98803atCggeLj43X48GFfloULiIuL08GDB90/X331lXtZv379tHDhQr333nv64osvdODAAXXs2NG9PCsrS23btlV6erpWr16tWbNmaebMmRo2bJgv3oqVTp48qQYNGujll1/Odvlzzz2nSZMmaerUqUpKSlKpUqUUHx+v1NRUd5tu3brp+++/17Jly7Ro0SKtXLlSPXv2dC9PSUnRzTffrGrVqmn9+vV6/vnnNWLECL366qtef382oh8teuhHi75i0ZcaH2ratKlJTEx0P87KyjJRUVFmzJgxPqwK5zN8+HDToEGDbJcdPXrUlChRwrz33nvu57Zu3WokmTVr1hhjjFm8eLFxOp3m0KFD7jZTpkwxoaGhJi0tzau141ySzAcffOB+7HK5TEREhHn++efdzx09etQEBgaaOXPmGGOM2bJli5Fkvv76a3ebTz75xDgcDvPTTz8ZY4x55ZVXTNmyZT2O6WOPPWZq167t5XdkJ/rRooV+tPgpqn2pz0YA09PTtX79erVu3dr9nNPpVOvWrbVmzRpflYWL2Llzp6KionTZZZepW7duSk5OliStX79eGRkZHsczNjZWVatWdR/PNWvWqF69egoPD3e3iY+PV0pKir7//vuCfSM4x969e3Xo0CGPY1imTBk1a9bM4xiGhYWpSZMm7jatW7eW0+lUUlKSu821116rgIAAd5v4+Hht375dv//+ewG9GzvQjxZN9KPFW1HpS30WAH/99VdlZWV5fIglKTw8XIcOHfJRVbiQZs2aaebMmVqyZImmTJmivXv3qlWrVjp+/LgOHTqkgIAAhYWFebzmzON56NChbI/36WXwrdPH4EK/k4cOHVKlSpU8lvv7+6tcuXIcZx+gHy166EeLv6LSl/rneQ2wRps2bdz/rl+/vpo1a6Zq1arp3XffVcmSJX1YGQAUDfSjKCx8NgJYoUIF+fn5nTO76eeff1ZERISPqkJuhIWF6fLLL9euXbsUERGh9PR0HT161KPNmcczIiIi2+N9ehl86/QxuNDvZERExDmTCzIzM/Xbb79xnH2AfrToox8tfopKX+qzABgQEKDGjRvrs88+cz/ncrn02WefqXnz5r4qC7lw4sQJ7d69W5GRkWrcuLFKlCjhcTy3b9+u5ORk9/Fs3ry5Nm/e7PGhX7ZsmUJDQ3XFFVcUeP3wVKNGDUVERHgcw5SUFCUlJXkcw6NHj2r9+vXuNp9//rlcLpeaNWvmbrNy5UplZGS42yxbtky1a9dW2bJlC+jd2IF+tOijHy1+ikxfmi9TSS7R3LlzTWBgoJk5c6bZsmWL6dmzpwkLC/OY3YTCY8CAAWbFihVm7969ZtWqVaZ169amQoUK5vDhw8YYY3r16mWqVq1qPv/8c7Nu3TrTvHlz07x5c/frMzMzTd26dc3NN99sNm7caJYsWWIqVqxohgwZ4qu3ZJ3jx4+bDRs2mA0bNhhJZsKECWbDhg1m3759xhhjxo4da8LCwsyCBQvMpk2bTPv27U2NGjXMqVOn3Ou45ZZbTKNGjUxSUpL56quvTExMjOnatat7+dGjR014eLi55557zHfffWfmzp1rgoODzbRp0wr8/dqAfrRooR8tHopDX+rTAGiMMZMnTzZVq1Y1AQEBpmnTpmbt2rW+Lgnn0aVLFxMZGWkCAgJM5cqVTZcuXcyuXbvcy0+dOmUeeughU7ZsWRMcHGw6dOhgDh486LGOH374wbRp08aULFnSVKhQwQwYMMBkZGQU9Fux1vLly42kc34SEhKMMX9+fcHQoUNNeHi4CQwMNDfeeKPZvn27xzqOHDliunbtakqXLm1CQ0PNfffdZ44fP+7R5ttvvzUtW7Y0gYGBpnLlymbs2LEF9RatRD9adNCPFg/FoS91GGNM3scRAQAAUFT4/FZwAAAAKFgEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwC00IgRI9SwYUNfl5Fr1atX14svvpindcycOVNhYWHux0V1XwDwraLad9CP4jQCYBGwYsUKORyOc24QfqkGDhzocY9Cm3lrX6xcuVLt2rVTVFSUHA6HPvzww3zfBoCcox/1Hm/tizFjxuiqq65SSEiIKlWqpDvuuEPbt2/P9+3YigBoEWOMMjMzVbp0aZUvXz5P6zrz5tT50c5X8mNfZOfkyZNq0KCBXn755XxfNwDfoR89l7f60S+++EKJiYlau3atli1bpoyMDN188806efJkvm/LRgTAfHD99derT58+6tOnj8qUKaMKFSpo6NChOvMue7///rvuvfdelS1bVsHBwWrTpo127tzpXr5v3z61a9dOZcuWValSpRQXF6fFixfrhx9+0A033CBJKlu2rBwOh7p37y5JcrlcGjNmjGrUqKGSJUuqQYMGev/9993rPP0X7yeffKLGjRsrMDBQX3311TnD9S6XSyNHjlR0dLQCAwPVsGFDLVmyxL38hx9+kMPh0DvvvKPrrrtOQUFBmj17drb7wuFwaMqUKbr99ttVqlQpjRo1SllZWerRo4e7ztq1a2vixIker+vevbvuuOMOjRs3TpGRkSpfvrwSExMv2PG99tprCgsLu+BfnjNnzlTVqlUVHBysDh066MiRIx7Lz94Xp+sYPXq0wsPDFRYWppEjRyozM1OPPvqoypUrp+joaL3xxhvn3aYktWnTRs8884w6dOhwwXYA/kQ/+jf60T8tWbJE3bt3V1xcnBo0aKCZM2cqOTlZ69evv+DrkEP5dldhi1133XWmdOnS5uGHHzbbtm0z//3vf01wcLB59dVX3W1uv/12U6dOHbNy5UqzceNGEx8fb2rVqmXS09ONMca0bdvW3HTTTWbTpk1m9+7dZuHCheaLL74wmZmZZt68eUaS2b59uzl48KA5evSoMcaYZ555xsTGxpolS5aY3bt3mzfeeMMEBgaaFStWGGP+vll1/fr1zdKlS82uXbvMkSNHzPDhw02DBg3ctU2YMMGEhoaaOXPmmG3btplBgwaZEiVKmB07dhhjjNm7d6+RZKpXr27mzZtn9uzZYw4cOJDtvpBkKlWqZF5//XWze/dus2/fPpOenm6GDRtmvv76a7Nnzx73/nnnnXfcr0tISDChoaGmV69eZuvWrWbhwoXn7MNq1aqZF154wRhjzLPPPmvKly9vkpKSzntc1q5da5xOp3n22WfN9u3bzcSJE01YWJgpU6aMu83Z+yIhIcGEhISYxMREs23bNjNjxgwjycTHx5tRo0aZHTt2mKefftqUKFHC7N+//wKfCs998sEHH+SoLWAr+tG/0Y9mb+fOnUaS2bx5c45fg/MjAOaD6667ztSpU8e4XC73c4899pipU6eOMcaYHTt2GElm1apV7uW//vqrKVmypHn33XeNMcbUq1fPjBgxItv1n+6Afv/9d/dzqampJjg42KxevdqjbY8ePUzXrl09Xvfhhx96tDn7lzUqKsqMGjXKo81VV11lHnroIWPM3x3Xiy++eNF9Ick88sgjF22XmJhoOnXq5H6ckJBgqlWrZjIzM93Pde7c2XTp0sX9+HTHNWjQIBMZGWm+++67C26ja9eu5tZbb/V4rkuXLhftuKpVq2aysrLcz9WuXdu0atXK/TgzM9OUKlXKzJkz56Lv0xgCIJAT9KN/ox89V1ZWlmnbtq1p0aJFjtrj4vwLcLCxWLv66qvlcDjcj5s3b67x48crKytLW7dulb+/v5o1a+ZeXr58edWuXVtbt26VJPXt21e9e/fW0qVL1bp1a3Xq1En169c/7/Z27dqlP/74QzfddJPH8+np6WrUqJHHc02aNDnvelJSUnTgwAG1aNHC4/kWLVro22+/zfF6Ltbu5Zdf1uuvv67k5GSdOnVK6enp58wai4uLk5+fn/txZGSkNm/e7NFm/PjxOnnypNatW6fLLrvsgnVs3br1nFOwzZs39zgtk524uDg5nX9fHREeHq66deu6H/v5+al8+fI6fPjwBdcDIHfoRy/czuZ+NDExUd99952++uqrHLXHxXENYCHx73//W3v27NE999yjzZs3q0mTJpo8efJ52584cUKS9PHHH2vjxo3uny1btnhcvyJJpUqVypcac7qes9vNnTtXAwcOVI8ePbR06VJt3LhR9913n9LT0z3alShRwuOxw+GQy+XyeK5Vq1bKysrSu+++ewnvIGeyqyMntQHwLfrR4tmP9unTR4sWLdLy5csVHR2dr3XajACYT5KSkjwer127VjExMfLz81OdOnWUmZnp0ebIkSPavn27rrjiCvdzVapUUa9evTR//nwNGDBA06dPlyQFBARIkrKystxtr7jiCgUGBio5OVm1atXy+KlSpUqO6w4NDVVUVJRWrVrl8fyqVas8asuLVatW6ZprrtFDDz2kRo0aqVatWtq9e/clratp06b65JNPNHr0aI0bN+6CbevUqZPtcQFQONGPnp+N/agxRn369NEHH3ygzz//XDVq1CiQ7dqCU8D5JDk5Wf3799eDDz6ob775RpMnT9b48eMlSTExMWrfvr0eeOABTZs2TSEhIRo8eLAqV66s9u3bS5IeeeQRtWnTRpdffrl+//13LV++XHXq1JEkVatWTQ6HQ4sWLdKtt96qkiVLKiQkRAMHDlS/fv3kcrnUsmVLHTt2TKtWrVJoaKgSEhJyXPujjz6q4cOHq2bNmmrYsKHeeOMNbdy48bwz1HIrJiZGb775pj799FPVqFFDb731lr7++utL/mW+5pprtHjxYrVp00b+/v565JFHsm3Xt29ftWjRQuPGjVP79u316aefXvS0RX45ceKEdu3a5X68d+9ebdy4UeXKlVPVqlULpAagqKEfPT8b+9HExES9/fbbWrBggUJCQnTo0CFJUpkyZVSyZMkCqaE4YwQwn9x77706deqUmjZtqsTERD388MPq2bOne/kbb7yhxo0b67bbblPz5s1ljNHixYvdQ+JZWVlKTExUnTp1dMstt+jyyy/XK6+8IkmqXLmynnrqKQ0ePFjh4eHq06ePJOnpp5/W0KFDNWbMGPfrPv7441x3CH379lX//v01YMAA1atXT0uWLNFHH32kmJiYfNk3Dz74oDp27KguXbqoWbNmOnLkiB566KE8rbNly5b6+OOP9eSTT573FM/VV1+t6dOna+LEiWrQoIGWLl2qJ598Mk/bzal169apUaNG7uuI+vfvr0aNGmnYsGEFsn2gKKIfPT8b+9EpU6bo2LFjuv766xUZGen+eeeddwpk+8Wdw5gzvmQJl+T6669Xw4YN83x7HQCwFf0oULAYAQQAALAMARAAAMAynAIGAACwDCOAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACW+X/Nq86ejfJ5pQAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -781,14 +842,14 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 17, "id": "60e3d581-8a7f-4133-8756-9750f0174c88", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "2b35bf4117bd405a9b8761edfdf46e5e", + "model_id": "", "version_major": 2, "version_minor": 0 }, @@ -802,7 +863,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "f8dd327c06d04e409f042f05b5d64baa", + "model_id": "", "version_major": 2, "version_minor": 0 }, @@ -815,7 +876,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUoAAAHACAYAAADEAiG7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAp30lEQVR4nO3deZxN9ePH8fed1VjGjMEszAzZk/BVJlv6fo0s2VUqv6RQNL5SlkhZYuyKRL9Skb6KSptEfCv8rCEiywhjyVrC2Ge5n98fNffrmslnmOXeb17Px2MeD/fec8/9nHPnvtx7ztxzHMYYIwDAn/Lx9AAAwNsRSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALPxyMpHT6dThw4dVrFgxORyO/B4TAOQ7Y4zOnDmjqKgo+fhc/T1jjkJ5+PBhRUdH58ngAMCbHDx4UGXLlr3qNDkKZbFixVwzDA4Ozv3IAMDDUlJSFB0d7erb1eQolJkft4ODgwklgL+UnGxOZGcOAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAIscHWatIKWmpsrpdHp6GAA8yMfHRwEBAZ4ehotXhTI1NVU7d+7UpUuXPD0UAB4UGBioqlWrek0svSqUTqdTly5dkp+fn/z8vGpoAApIenq6Ll265FWfLL2yRn5+fl7zPwmAgpeenu7pIbhhZw4AWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQACz9PDwBZbUtMvOb7VB8yJB9GAkDiHSUAWBFKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWPh5egCetC0x8ZrvU33IkHwYCQBvxjtKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWPh5egD469uWmHjN96k+ZIjXPQZuXLyjBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgIWfpweQnZ2TJsnX6fT0MIA8sS0x8ZrvU33IkHwYCa4X7ygBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABg4ZUH7r2RGGNkjHG7zi809Jrn4/TiAx0XxPJ48zrz5rF5o8zXw6VLl3I1H39/f/n6+ubFkAilpxhjlJaWJqfTKYfD4XZbmXvvveb5paWl5dXQ8lxBLI83rzNvHps3MsbI399fhw4dyvLauFYhISGKiIjI9XwIpYdkvhDCw8NVqFAhtyfy4i+/XPP8CpUqlWdjy2sFsTzevM68eWzeyOl0yhijQoUKycfn+rYOGmN0/vx5HT9+XJIUGRmZqzERSg8wxsjpdCo8PFwhISFZbndex8eFwMDAPBhZ/iiI5fHmdebNY/NGeRFKSQoKCpIkHT9+XKVLl87Vx3B25niAMUYOh0OFChXy9FCAv7TChQtLyv2mDELpQbndbgLg6vLqNUYoAcCCUCJP9OjRQ/fdd5+nhwHkC3bmeJm1998vk8PtKbWmT8/n0XivUaNGacGCBVq3bt1Vp9uxa5fGvPyyfvjxRx08dEiJzz+vXo89VkCjxF8F7yhvYKmpqZ4eQr67cOGCysXEaOjAgQq/gf/kBrlDKG8gd999t/r27av+/furbNmyat26tSRpypQpuu222xQWFqaKFSvqqaee0tmzZ133e/fddxUREaGlS5eqVq1aKlmypNq0aaMjR4786WNt2LBB0dHRmjhxYra3p6amauCwYaoWF6fIqlV1a8OGevmyd8inTp1Sr169FB0drdKlS6t58+basmWLazyJiYnasmWLgoKCFBQUpPc++ijbx/lbzZp6cfBgdWzdWgEBAde8zgCJj943nDlz5qhHjx765ptvXNf5+Pho0qRJKleunJKTk/XUU09pyJAhmjJlimua8+fPa/LkyXrrrbfk4+Ojxx57TIMHD9asWbOyPMayZcv0wAMPKDExUd26ddOFbIL6+jvvaNG//623p05V2TJldOjwYR26bLrOnTsrKChIn376qYoXL64333xTLVu21JYtW3Tvvfdq27ZtWrp0qRYuXChJCrhwIQ/XEuCOUN5gKlasqNGjR7td989//tP179jYWA0bNkx9+vRxC2VaWpqmTp2qm266SZLUs2dPjRkzJsv8P/vsM3Xv3l3Tp0+/6s6dQ4cPq0K5crrj9tvlcDgUXaaM67a169drw4YNOnDggOsPr8eOHasFCxbok08+Ubdu3VS0aFH5+fkpIiJCkrKNMZBXCOUNpnbt2lmu++abbzRhwgQlJSXpzJkzSk9P18WLF3X+/HnXH+wWLlzYFUlJioiIcH09LNOGDRu0aNEivffee2rTps1Vx/Fgx47q0KWL6jZpoiaNG+vuf/xD/2jUSJL0444dOnv2rMpcFk/p9+2Ne/fuva7lBnKDUN5gMsOXaf/+/erQoYN69Oih4cOHq0SJElq9erV69uyp1NRU1/T+/v5u93M4HFmOelS+fHmVKFFCs2fPVosWLbLc53I1b7lFm5Yv17+XL9fyVav0WO/eatyggd6ZPl3nzp9XRESElixZkuV+xYsXv95FB64bobzBff/993I6nRo3bpzre7Xz58+/rnmFhYVp7ty5atasmTp37qw5c+ZcNZbBxYqpQ6tW6tCqldq0aKH7unbVyVOndGv16jp27Jj8/PwUGxub7X0DAgKUkZFxXeMErhV7vW9wFSpUUFpamqZPn67k5GS99957mjFjxnXPr3Tp0lq0aJF27dqlLl26KD09Pdvppr35puZ//rl27dmj3Xv36rMvv1R4qVIqHhysuxo2VFxcnO6//379+9//1v79+7VmzRoNGzZMGzdulCTFxMRo3759+uGHH/Trr7/+6bELU1NTtXX7dm3dvl1paWk6cuyYtm7frr379l33MuLGQyhvcLfeeqvGjRunSZMmqU6dOpo7d65GjhyZq3lGRERo0aJF2rZtm7p27ZrtO7+iRYvqlTfeUJO2bdWkXTsd/PlnzXv7bfn4+MjhcOjTTz9VgwYN9Pjjj6tGjRrq0qWLDhw4oNKlS0uS2rdvr6ZNm6p58+aKjo7W/AULsh3L0ePH1bhVKzVu1UpHjx/XqzNmqHGrVnpq8OBcLSNuLA5z5YambKSkpKh48eI6ffq0goOD820wFy9e1NatW7Vv2jT5eukRnqsPGZLreTidTqWlpSk2Njbbw2ldzx7coFweby8/FcTyePM68+axeaPMw6wFBQXl6jBr0u9NSU5OVvny5bMcretausY7SgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJReJi0tTZdSU3P082ffo8bvVq5dqxI33aTTKSkF/tijRo1SXFxcgT9uTmU3vlGjRik2NlZBQUH6/PPPC3Q8+/fvV1BQkH744YcCfdyc4uhBXiQtLU27d+9WyrFjOZo+MCBAFaKj5ef313kaV65dqzYPPaQjR44oJCTE08P5y+rbt6969erlurxz504lJiZq3rx5qlu3rkJDQwt0PGXLllVycrJKliwpSVqxYoVatmypkydPesXvwV/nFfYX4HQ6deHCBfn6+srP1/eq06ZnZOhSaqoynE6exL+otLS0qx6mLjeKFi2qokWLui5nHhC5devWcjgc1z3f6x2zr6+v62j1Ti88zgMfvb2Qn6+v/P38rvpjC2l2nE6nJkyYoKpVqyo0NFR169bVxx9/LEkyxqhly5Zq3bq164C8v/32mypUqKAXX3xR0u//ywcFBWnRokW6/fbbFRISojvvvFPbtm1ze5xVq1apSZMmCg0NVcWKFTVoxAidO3/edfulS5c0fOxY3dKggSKqVlWdv/9d786bpwM//6w2Dz0kSYqMjFRQUJB69OhhHXumpd9+q9v/8Q9FVaumNg89pAM//3zV9ZHdx71Tp04pKChIK1ascFvmb7/9Vg0aNFCJEiV01113adeuXW7zmjBhgmJjY1WqVCn17NlTFy9ezPJ4s+fNU1zTpoqsWlVx8fF66913Xbcd+PlnlbjpJn38xRdq9cADCgkJ0dy5c/NtzJd/9B41apQ6duwo6fcDOwcFBbnW+ejRo1WhQgUVL15ccXFxbgdTzhzLhx9+qKZNm7rGnHmO9/Hjxys2NlYREREaPXq00tPTNXjwYEVFRalChQqaPXt2tsu1f/9+tWzZUpIUGhoqh8Ohrl27avbs2QoLC8tySL127drp4YcfzvoE5yFCeQOZMGGC5syZo6lTp+r777/XP//5Tz322GP6v//7PzkcDs2YMUMbN27UtGnTJP1+Lp2oqCg999xzbvN57rnnNHbsWK1cuVKlSpVSx44dlfbHucj37t2rtm3bql27dlq/fr3effddrd2wQQOHDXPdv1f//pq/YIHGDh2qtUuW6KVRo1SkSBGViYzUO3+ciXHLli1KTk52ncXxamOXpJ8PH1aXXr3UrEkTLV+4UA/ff79eHD8+z9bdsGHDNHbsWK1atUp+fn564oknXLd99NFHSkxM1IgRI7Rq1SpFRETojTfecLv/h59+qrEvv6zn+/XT2qVL9Xz//hr98st6/4qDJL84frye6NpVmzdvVnx8fL6N+XJ9+/Z1jTc5OVnJycmSpFdffVVTpkzRmDFjtH79esXHx+vee+/V7t273e7/wgsvKCEhwW3My5cv1+HDh7V06VKNGzdOI0eOVIcOHRQaGqoVK1aoR48e6t27t37O5j+zsmXLas6cOZKkpKQkHTlyRFOmTNF9992njIwMt+2nx48f18KFC/VYPp+rnU9tN4hLly5p/PjxWrhwoe644w5Jv5+6YfXq1XrzzTfVqFEjlSlTRlOnTlX37t117NgxffXVV1q7dm2WbaDPPfecmjRpIkmaMWOGKlasqM8++0z33nuvJkyYoAceeMB1wrKKFStq7NChavXgg5o0apR+PnRIny5cqI9nz9ZdDRtKksrFxLjmHfrH9qhSpUq5tk3lZOwz58xRudhYjfrjMHiVbrpJ25OSNOX11/Nk/Y0YMUKN/jinT//+/dW+fXtdvHhRhQoV0quvvqquXbuqa9eukqThw4frm2++cXvnM3bKFI187jm1bt5ckhQbHa2k3bs16/339eAf7+Ykqeejj6p18+Z5cpi1q435ckWLFnWdYiPz468kTZ48Wf369dP9998vSUpMTNTy5cv16quvavLkya7pevfurXbt2rnNMzQ0VC+99JJ8fHxUuXJlvfTSSzp//rwGDhwoSRowYIAmTpyo1atXu+afydfX17WNtHTp0m7bKB966CHNnDnTdeK6f/3rX4qJidFdd911fSsphwjlDWLPnj06f/68WrVq5XZ9amqqatas6brcsWNHff7555o4caJeeeUVVaxYMcu8Lt9bWqJECVWuXFlJSUmSfn8n+OOPP7p9bDROp5xOp/YfPKjtSUny9fVVg2vYI5yTse/as0d1LlsOSbr9b3/L8WPY3HLLLa5/Z8bk+PHjiomJUVJSkmsTQaa4uDjXR+Fz588ref9+9Rk0SH0ve3eenp6u4GLF3O5Xq0aNAhmzTUpKio4cOaJ69eq5XV+vXj1t3brV7bq/ZbOeb775ZrdjSZYuXVrVq1d3Xfb19VWJEiX0yy+/5Gxh/tCjRw/dfvvtOnTokMqUKaNZs2apa9euudqumhOE8gZx9uxZSdInn3yiqKgot9sCAgJc/z5//rw2bdokX1/fLB+xcuLcuXPq1q2bEhISXNdd/ONsjWWjopS8f3++jf1aZb64Lj92deYmhCtdvoMiu/tdzblz5yRJk0ePVp1atdxu871iW3ORK07+5qkxX4siRYpkue7KTyEOhyPb6651x03t2rVVs2ZNzZ49W3fffbe2bdvmOrd7fiKUN4hq1aopMDBQBw8edH0cy86gQYPk4+Ojzz77TO3atVOLFi2yfKz57rvvXO9KTp48qZ9++klVqlSRJNWqVUs7d+5UhQoVXNNfuOzFf3OVKnI6nVq1bp3ro/flMl/cl58+Iidjr1yhghZ9/bXbdRs2bfrT5ZR+/3gvSUePHnVdt2XLlqveJztVqlTR+vXr1blzZ9d13333nevfpUuVUmR4uPYdPKj7rviIeq3yasw2wcHBioyM1Jo1a9zW+Zo1a3Tbbbfl+eNdKfM/wOxOI9K9e3dNnjxZhw4dUnx8vKKjo/N9PITyBlGsWDH17dtXAwcOlNPpVP369XX69GmtWbNGwcHB+p//+R8tWrRI77zzjpYtW6batWvr6aefVvfu3bV+/Xq3v6sbM2aMwsLCVLp0aQ0fPlxhYWGu83j369dPjRs3Vt++ffXoo4+qSJEi+mH1ai1buVLjR4xQTNmyeqBDB/3z2Wc1dtgw3VKtmg4eOqRfTpxQ+3vuUXSZMnI4HFq0aJGaNWumoKCgHI390Yce0rS33tLQMWP0cKdO+mHr1iw7Sq4UFBSkunXrauLEiSpXrpx++eUXDR8+/JrXbUJCgh5//HH97W9/U7169TR37lzt2LFD5cuXd03zbN++GjxihIKLFVOTO+9UamqqNm3dqlOnTyuhe/ccP1ZejTknnn76aY0aNUrly5d3vYvbsmWLZs2alS+Pd7mYmBg5HA598cUXatmypYKCglx/zvTQQw+pf//+mjFjhtue8/zEXm8vlJ6RobT09Kv+pF/HqVqHDRumQYMGacKECapVq5batm2rxYsXu15wvXr10vPPP6/atWtL+n1vZnh4uGvHTKaRI0eqf//+ql+/vo4ePar58+e73gHUqFFDS5Ys0e7duxUfH6877rhDY15+WRF/nBRMkiaNGqU2LVpowNChimvaVH0HD9b5P/58KCoiQoP69tULL7yg2NhYPf3009axS1LZMmX0zrRp+nLJEt3ZsqVmvveenu/f37pOXn/9daWnp6t+/foaMGDAdUXnvvvu0+DBgzVkyBDVr19fBw4cyLLNskunTpoyZoze++gjNWzZUq0efFDvz5+v2Ot4N5QXY86JhIQE9enTR4MGDdJtt92mpUuX6qOPPsp2u3Vei4qK0vDhwzVo0CCFh4erd+/ertuKFy+ujh07qmjRoll2IuUXTi52jfLz5GK5+WZOQZyMasWKFWrWrNk1f2uGk4t579i8UU5OLtakSRNVr15dr7zyylXnlVcnF+Ojtxfx9/dXxYoVdc6yQT+Tr4/PX+rri4DNyZMntWzZMi1btkzT//ib24LAq8zL+Pv7KzAXe3KBv7LatWvr5MmTGjdunGsHYkEglMixO++8UxcuXPD0MHAD27dvn0cel505AGBBKD0oP/74F8B/5NVrjFB6gMPhkDEm2yPMAMg7mX92ltvD1bGN0gMcDod8fHxc33MtVKiQ23dVU6/jbyR9rjj0lDcpiOXx5nXmzWPzRpl/HpT5OrkexhidP39ex48fV0hISJavil4rQukh/v7+SktL07Fjx7J8oT/t9Olrn99lx3v0NgWxPN68zrx5bN7IGCNjjAICAnJ9sIuQkBC3IyJdL0LpIQ6HQwEBAa5fisvt++ija55fpZ4982poea4glseb15k3j80bpaWlKTU1VeXLl3f7Qsa18vf3z/U7yUyE0sMcDkeW/zXTT5685vlc70eUglAQy+PN68ybx+aNMl8PgYGBWb5N4ymE8ga2LTHR00MA/ivcuP9tAUAOEUoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACz8PD2AG8G2xMS/xGMUJG9dZ9WHDMmHkeSNglhn3rz8+Yl3lABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwMJhjDG2iVJSUlS8eHGdPn1awcHB+TaYixcvauvWrdo3bZp8nc58exwA3ivDx0flEhJUo0YNFSpUKN8e51q6xjtKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALDwy8lExhhJUkpKSr4O5uLFizp79qzOpqXJ54/HBHBjcTocOnv2rFJSUpSamppvj5PZM5OD1jhMDqb6+eefFR0dnfuRAYCXOXjwoMqWLXvVaXIUSqfTqcOHD6tYsWJyOBx5NsArpaSkKDo6WgcPHlRwcHC+PQ68F78DKKjfAWOMzpw5o6ioKPn4XH0rZI4+evv4+FiLm5eCg4N5kdzg+B1AQfwOFC9ePEfTsTMHACwIJQBYeFUoAwMDNWzYMAUGBnp6KPAQfgfgjb8DOdqZAwA3Mq96RwkA3ohQAoAFoQQAC0IJABZeE8pp06apXLlyKlSokOLi4vTdd995ekjII8OHD5fD4XD7qVq1quv2ixcvKiEhQWFhYSpatKg6duyoY8eOuc3jwIEDuueee1S4cGGVLl1aAwYMUHp6ekEvCnJoxYoVat26taKiouRwOPTpp5+63W6M0dChQxUZGamgoCDFx8frp59+cpvmt99+U+fOnRUcHKyQkBB169ZNZ8+edZtmy5YtatSokQoVKqTo6GiNHz8+X5bHK0I5b948PfPMMxo2bJi+//571axZU82aNdPx48c9PTTkkerVq+vIkSOun5UrV7pue/rpp7VgwQJ9+OGHWr58uQ4fPqwOHTq4bs/IyNA999yj1NRUrV69Wu+8845mzZqloUOHemJRkAPnzp1TzZo1NW3atGxvHz9+vF555RX97//+r9atW6ciRYqoWbNmunjxomuazp07a9u2bVq6dKm++OILrVixQo8//rjr9pSUFN19992KjY3Vxo0bNWHCBA0fPlxvvPFG3i+Q8QJ169Y1CQkJrssZGRkmKirKjBkzxoOjQl4ZNmyYqVmzZra3nTp1yvj7+5sPP/zQdd2OHTuMJLNmzRpjjDFffvml8fHxMUePHnVN89prr5ng4GBz6dKlfB07ck+S+eSTT1yXnU6niYiIMBMmTHBdd+rUKRMYGGjef/99Y4wx27dvN5LM+vXrXdMsWrTIOBwOc+jQIWOMMdOnTzehoaFuvwPPPvusqVKlSp4vg8ffUaampmrjxo2Kj493Xefj46P4+HitWbPGgyNDXvrpp58UFRWlm266SZ07d9aBAwckSRs3blRaWprb81+1alXFxMS4nv81a9aoRo0aCg8Pd03TrFkzpaSkaNu2bQW7IMi15ORkHT161O05L168uOLi4tye85CQEN12222uaeLj4+Xj46N169a5prnzzjsVEBDgmqZZs2ZKSkrSyZMn83TMHg/lr7/+qoyMDLcXgSSFh4fr6NGjHhoV8lJcXJxmzZqlxYsX67XXXlNycrIaNWqkM2fO6OjRowoICFBISIjbfS5//o8ePZrt70fmbfjvkvmcXe01f/ToUZUuXdrtdj8/P5UoUcIjvxc5OnoQkBstWrRw/fvWW29VXFycYmNj9cEHHygoKMiDIwNyxuPvKEuWLClfX98sezmPHTumiIgID40K+SkkJESVK1fW7t27FRERodTUVJ06dcptmsuf/4iIiGx/PzJvw3+XzOfsaq/5iIiILDtz09PT9dtvv3nk98LjoQwICFCdOnX09ddfu65zOp36+uuvVa9ePQ+ODPnl7Nmz2rNnjyIjI1WnTh35+/u7Pf9JSUk6cOCA6/mvV6+etm7d6vbCWbp0qYKDg3XzzTcX+PiRO+XLl1dERITbc56SkqJ169a5PeenTp3Sxo0bXdN88803cjqdiouLc02zYsUKpaWluaZZunSpqlSpotDQ0LwddJ7vHroOc+fONYGBgWbWrFlm+/bt5vHHHzchISFueznx36tfv35m2bJlJjk52axatcrEx8ebkiVLmuPHjxtjjOnZs6eJiYkx33zzjdmwYYOpV6+eqVevnuv+6enp5pZbbjF333232bx5s1m8eLEpVaqUGTx4sKcWCRZnzpwxmzZtMps2bTKSzEsvvWQ2bdpk9u/fb4wxZuzYsSYkJMR89tlnZsuWLaZt27amfPny5sKFC655NG/e3NSuXdusW7fOrFy50lSqVMk8+OCDrttPnTplwsPDzcMPP2x+/PFHM3fuXFO4cGHz+uuv5/nyeEUojTFm6tSpJiYmxgQEBJi6deuatWvXenpIyCOdOnUykZGRJiAgwJQpU8Z06tTJ7N6923X7hQsXzJNPPmlCQ0NN4cKFTfv27c2RI0fc5rFv3z7TokULExQUZEqWLGn69etn0tLSCnpRkEPffvutkZTl55FHHjHG/P4nQi+88IIJDw83gYGBpkmTJiYpKcltHidOnDAPPvigKVq0qAkODjaPPvqoOXPmjNs0P/zwg2nYsKEJDAw0ZcqUMWPHjs2X5eEwawBg4fFtlADg7QglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCiRwbPny4atWq5elhXLNy5cpp8uTJuZrHrFmz3I5w9N+6LnB9COVf2LJly+RwOLIccOJ69e/f3+37uTey/FoXtlMowDMIJayMMUpPT1fRokUVFhaWq3ldfgCDvJjOU/JiXWTHdgoFeAah9KC77rpLvXv3Vu/evVW8eHGVLFlSL7zwgi7/VunJkyfVpUsXhYaGqnDhwmrRooXbSZj279+v1q1bKzQ0VEWKFFH16tX15Zdfat++ffr73/8uSQoNDZXD4VDXrl0l/X50pjFjxqh8+fIKCgpSzZo19dFHH7nmmflOdNGiRapTp44CAwO1cuXKLB83nU6nXnzxRZUtW1aBgYGqVauWFi9e7Lp93759cjgcmjdvnho3bqxChQppzpw52a4Lh8Oh1157TW3atFGRIkWUmJiojIwMdevWzTXOKlWqaMqUKW7369q1q9q1a6eJEycqMjJSYWFhSkhIuGpo33zzTYWEhFz1HeGsWbMUExOjwoULq3379jpx4oTb7Veui8xxjB49WuHh4QoJCdGLL76o9PR0DRgwQCVKlFDZsmU1c+bMP31M6fdjd44aNUrt27e/6nQoYPnyDXLkSOPGjU3RokXNU089ZXbu3Gn+9a9/mcKFC5s33njDNU2bNm1MtWrVzIoVK8zmzZtNs2bNTMWKFU1qaqoxxph77rnHNG3a1GzZssXs2bPHLFiwwCxfvtykp6eb+fPnG0kmKSnJHDlyxJw6dcoYY8yoUaNM1apVzeLFi82ePXvMzJkzTWBgoFm2bJkx5j8HNLj11lvNkiVLzO7du82JEyeynPvmpZdeMsHBweb99983O3fuNAMHDjT+/v5m165dxhhjkpOTjSRTrlw5M3/+fLN3715z+PDhbNeFJFO6dGnz9ttvmz179pj9+/eb1NRUM3ToULN+/Xqzd+9e1/qZN2+e636PPPKICQ4ONj179jQ7duwwCxYsyLIOY2Njzcsvv2yMMWbcuHEmLCzMrFu37k+fl7Vr1xofHx8zbtw4k5SUZKZMmWJCQkJM8eLFXdNcuS4eeeQRU6xYMZOQkGB27txp3nrrLSPJNGvWzCQmJppdu3aZkSNHGn9/f3Pw4MGr/Fa4r5PLzzUDzyGUHtS4cWNTrVo143Q6Xdc9++yzplq1asYYY3bt2mUkmVWrVrlu//XXX01QUJD54IMPjDHG1KhRwwwfPjzb+WcG7+TJk67rLl68aAoXLmxWr17tNm23bt1ch7DKvN+nn37qNs2VcYiKijKJiYlu09x+++3mySefNMb8J5STJ0+2rgtJpm/fvtbpEhISTMeOHV2XH3nkERMbG2vS09Nd1913332mU6dOrsuZoRw4cKCJjIw0P/7441Uf48EHHzQtW7Z0u65Tp07WUMbGxpqMjAzXdVWqVDGNGjVyXU5PTzdFihRxnUDLhlB6D04F4WF33HGHHA6H63K9evU0adIkZWRkaMeOHfLz83MdqFSSwsLCVKVKFe3YsUOS1KdPH/Xq1UtLlixRfHy8OnbsqFtvvfVPH2/37t06f/68mjZt6nZ9amqqateu7Xbd5Sd2ulJKSooOHz6sBg0auF3foEED/fDDDzmej226adOm6e2339aBAwd04cIFpaamZtnbXL16dfn6+rouR0ZGauvWrW7TTJo0SefOndOGDRt00003XXUcO3bsyPLRt169em6bFbJTvXp1+fj8Z2tWeHi4brnlFtdlX19fhYWFcRrm/0Jso/wv1717d+3du1cPP/ywtm7dqttuu01Tp0790+kzTyC/cOFCbd682fWzfft2t+2UklSkSJE8GWNO53PldHPnzlX//v3VrVs3LVmyRJs3b9ajjz6q1NRUt+n8/f3dLjscDjmdTrfrGjVqpIyMDH3wwQfXsQQ5k904cjI2eD9C6WGZp97MtHbtWlWqVEm+vr6qVq2a0tPT3aY5ceKEkpKS3E6BEB0drZ49e+rjjz9Wv379NGPGDElyncYzIyPDNe3NN9+swMBAHThwQBUrVnT7iY6OzvG4g4ODFRUVpVWrVrldv2rVqjw7PcOqVatUv359Pfnkk6pdu7YqVqyoPXv2XNe86tatq0WLFmn06NGaOHHiVaetVq1ats8Lblx89PawAwcO6JlnntETTzyh77//XlOnTtWkSZMkSZUqVVLbtm3Vo0cPvf766ypWrJgGDRqkMmXKqG3btpKkvn37qkWLFqpcubJOnjypb7/9VtWqVZMkxcbGyuFw6IsvvlDLli0VFBSkYsWKqX///nr66afldDrVsGFDnT59WqtWrVJwcLAeeeSRHI99wIABGjZsmCpUqKBatWpp5syZ2rx585/u2b5WlSpV0uzZs/XVV1+pfPnyevfdd7V+/XqVL1/+uuZXv359ffnll2rRooX8/PzUt2/fbKfr06ePGjRooIkTJ6pt27b66quvrB+788rZs2e1e/du1+Xk5GRt3rxZJUqUUExMTIGMAVnxjtLDunTpogsXLqhu3bpKSEjQU089pccff9x1+8yZM1WnTh21atVK9erVkzFGX375pesjXUZGhhISElStWjU1b95clStX1vTp0yVJZcqU0YgRIzRo0CCFh4erd+/ekqSRI0fqhRde0JgxY1z3W7hw4TUHqE+fPnrmmWfUr18/1ahRQ4sXL9bnn3+uSpUq5cm6eeKJJ9ShQwd16tRJcXFxOnHihJ588slczbNhw4ZauHChnn/++T/dRHHHHXdoxowZmjJlimrWrKklS5bo+eefz9Xj5tSGDRtUu3Zt1/biZ555RrVr19bQoUML5PGRPU4F4UF33XWXatWqleuv1wHIX7yjBAALQgkAFnz0BgAL3lECgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgMX/AynOsCF3qirTAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUoAAAHACAYAAADEAiG7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAq80lEQVR4nO3deZxN9ePH8fedGbNYZkFmYYyxCzFfMpa0jrVFkq9KhSRlJNnSV5ZiLFkilUrfSAtK0Vf2XyXfhC9qkDUaSyElY8gyc+d+fn9kbm4mn5kxM3fi9Xw8PB7m3nPP+Zxz733NuffOPcdhjDECAPwlH28PAACKOkIJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoCFX04mcrlcOnjwoEqVKiWHw1HQYwKAAmeM0YkTJxQVFSUfn4vvM+YolAcPHlR0dHS+DA4AipIDBw6oQoUKF50mR6EsVaqUe4bBwcGXPjIA8LK0tDRFR0e7+3YxOQpl1svt4OBgQgngspKTtxP5MAcALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKARY4Os1aY0tPT5XK5vD0MAF7k4+Mjf39/bw/DrUiFMj09XTt27NDZs2e9PRQAXhQQEKCaNWsWmVgWqVC6XC6dPXtWfn5+8vMrUkMDUEicTqfOnj1bpF5ZFska+fn5FZnfJAAKn9Pp9PYQPPBhDgBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALP28PAIVra1JSrqavPWRIAY0E+PtgjxIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFj4eXsAQGHYmpSU69vUHjKkAEbiqaiOC57YowQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoCFn7cHgD9sTUrK1fS1hwwpoJH8IbdjkgpnXEBhYo8SACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFhy418uMMTLGSJL8wsJydVuXy5Xr5eV2GXmRl3EVtLysd2GsR1EdlzdlPR/Onj17SfMpVqyYfH1982NIcpisUV1EWlqaQkJCdPz4cQUHB+fLgrNz5swZbdmyRYGBgfL39y+w5RQFxhhlZGTI5XLJ4XBIkjKOH8/VPIqFhOR6ubldRl7kZVwFLS/rXRjrUVTH5U1ZOw/+/v7u50ZehYaGKiIiItv55KZr7FF6SUZGhiQpPDxcgYGBcjgcOvPzz7maR+BVV+V6ubldRl7kZVwFLS/rXRjrUVTH5U0ul0vGGAUGBsrHJ2/vDhpjdOrUKR05ckSSFBkZeUljIpReYIyRy+VSeHi4QkND3Ze7cvkyISAgINfLzu0y8iIv4ypoeVnvwliPojoub8qPUEpSUFCQJOnIkSMqV67cJb0M58McLzDGyOFwKDAw0NtDAS5rxYsXl/THK7i8IpRedKnvvwC4uPx6jhFKALAglMgXiQMH6v6ePb09DKBA8GFOEZPcq1eOp20yf34BjqRoGzVqlBYuXKh169ZddLpt27bpueee09cbNujAjz8q6Zln9NhDDxXSKHG5YI/yCpaenu7tIRS4U6dOKTY2VsMGDVL4Zf5nNSg4hPIK0rJlSw0aPlxPP/ecqjZooLu7dpUkvfzGG2rWurUq1K6tOs2aacDQoTr522/u2703b54q1aunT1etUnyLFoquU0d3d+2qw+f+Ri07GzZsUHR0tCZMmJDt9enp6erbt69iY2MVGhqq6tWra/z48e7rU1NT9dhjjyk6OlrlypVT69attXnzZknS22+/raSkJG3evFlBQUEKCgrS22+/ne1yGjZsqDFjxqjD7bdf9l9iQMHhpfcVZs5HH6lb585a8sEH7st8fHw0dvhwxURHa+/+/Ro4bJhGjB2rCSNHuqc5feaMXpo+Xa9OnCgfHx/17NdPw0aP1uuTJ1+wjJUrV+qee+5RUlKSunfvnu04Xn75ZS1atEhvv/22oqOj9cMPP+iHH35wX9+5c2cFBQVpwYIFCgkJ0RtvvKG2bdtq8+bNuvvuu7V161atWLFCixYtkiSFXObfVoF3EcorTOVKlfTs4MEel53/nl3FChX0r3791H/oUI9QZmRkaNKoUYqNiZEk9XjgAY2fOvWC+X+ybJl6DRyoV155RR07dvzLcRw4cEBVq1ZVs2bN5HA4FHNuvpK0evVqbdiwQfv373f/cfXYsWO1cOFCzZ8/X927d1fJkiXl5+eniIiIvG0IIBcI5RWmXp06F1y28ssvNXnaNH33/fc6cfKknE6nzpw9q1OnT6v4uW83FA8KckdSksLLldPPR496zGdjcrKWffaZ3nvvPd1xxx0XHccDDzyg2267Tddcc41atGihtm3bKiEhQZK0ZcsWnTx5UuXLl/e4zenTp/X999/nab2BS0EorzBZ4cuy/4cfdO/DD6tb584aMmCAwkJCtHbDBvUZPPj3bzOcm97Pz/Oh4nA49OfjqVSqWFFhYWGaNWuW2rRpo2LFiv3lOOLi4rR9+3YtW7ZMn3/+ue6//37ddNNNmj17tk6ePKmIiAgtX778gtvxEhveQCivcMlbtshljEYNGeL+Xu2CxYvzNK8ypUtr1rRpavfgg+rcubPefffdi8YyODhYHTt2VMeOHdW+fXvdcccd+vXXXxUXF6effvpJfn5+Hi/Jz+fv76/MzMw8jRPILT71vsJVrlRJGRkZev2tt7R3/37NnT9fM957L8/zu6psWS1ZskS7du3Sgw8+KKfTme10U6ZM0dy5c7Vz50599913+uijjxQREaHQ0FDdfPPNio+P1z//+U/93//9n/bt26c1a9Zo+PDh2rhxoySpYsWK2rt3rzZt2qRffvnlL49dmJ6erk2bNmnLtm3KyMjQoZ9+0pZt2/T93r15XkdceYrcHuXuadPk53LJN4cHJ609ZEgBj0jampSU69sUxrjyQ51atTRqyBC9+NprGjl+vJo0aqRhAwfqsf798zzPiIgILVmyRK1atVLXrl311ltvXXDkllKlSumFF17Q7t275evrqwYNGmj+/Pl/7NUuWKDhw4frkUce0S+//KLw8HBdd911KleunCSpffv2+vjjj9W6dWulpqbq9ddf1wMPPHDBWA4dOqTGjRu7f35p+nS9NH26msXHa+Hs2Xlex79y+tChfJ8nvK/IHbh3fq9el30oXS6XMjIyFBMT43HIrNw+yYLycIy9wngi52VcBS0v68329Y6sw6wFBQVd0mHWpN+bkpKSotjY2AuO1pWbrvHSGwAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCGUR43Q6dTY9PUf/LvVcxZe7VatWKSgoSKmpqYW+7LGTJ+v6W28t9OXm1KhRoxQfH3/BZTExMQoKCtJ//vOfQh3Pvn37FBQUpE2bNhXqcnOqyH3X+0rmdDq158ABnc3huWyCT51S1apVL3qEnr+bVatWqVWrVjp06JBCQ0O9PZzLVt++ffXYY4+5f96xY4eSkpI0d+5cNWrUSGFhYYU6ngoVKiglJUVly5aV9PvjoG3btjp27FiReBwQyiIk0+XS2fR0+fr6yu9PB5H4M2dmpk6fPi1XDr8Tj7+fjIyMAvslWLJkSZUsWdL9c9YBkW+//XY5HI48zzevY/b19XUfrb4oPqZ56V0E+fn6qpif30X/2UKaHZfLpRdeeUX1r79eUbVqqXnbtvr43LEnjTFqf//96tCli/uAvMdSU1W7aVONfuEFSdKXa9eqdOXKWv7ZZ7quTRtF1qypFnfdpW07d3osZ/Xq1brlllsUFhamqlWrql+/fvrtvJOVnT17VkOGDFHVqlUVEhKi2rVra+bMmdq3b59atWolSYqMjFRQUJB69OjhHvv48eNVs2ZNhYWFqVGjRvroo488lrt06VLVrVtXYWFhatWqlfbt23fR7bH/hx9UunJlbdm2zX3Z8bQ0la5cWV+uXSvpj5fvn3/+uZo1a6bSpUvrxhtv1K5duzzmNX78eMXExKhi3bp6/Kmnsj3s26y5cxXfooUia9ZUfEKC/n3eCdGyxvLRJ5/otnvuUWTNmvrg44+zHfOfX6KmpqYqKChIq1atyvGYz3/pPWrUKHXo0EGSVLx4cQWdO1izy+XS6NGjVaVKFYWEhCg+Pt7jYMpZL5c/+OADtWjRQqGhoZozZ4569Oihjh076vnnn1dMTIwiIiI0evRoOZ1OPf3004qKilKVKlU0a9asC+a1adMm7du3T23btpUkhYWFyeFwqGvXrpo1a5bKlClzwba98847sz1yVH4ilFeQ8ePHa878+Zo4apS+WrZMjz30kB7t10+r162Tw+HQyxMm6JvNm/XazJmSpH7PPKPI8HANevxxj/kMGztWI//1L326YIHKli6t+3r0cL9fmrJvn9q1a6c777xT69ev19tvv601a9boySefdN++e/fuev/99zVx4kQlJyfrpZdeUokSJVShQgXNPnfos82bNyslJcV9Fsfx48fr3Xff1dSpU/X111/r8ccf10MPPaT//ve/kn4/B88999yjtm3bat26deratauGDh2ab9tu+PDhGjt2rFavXi0/Pz/17NnTfd28efOUlJSkZ599Vp9+/LEiypXTv9991+P2HyxYoLEvvKBn+vfX2hUr9MyAARr9wgua/eGHHtM99/zz6tm1q9auWKGbr7++wMZ8vr59++r111+XJKWkpCglJUWS9NJLL2nKlCkaM2aM1q9fr4SEBN19993avXu3x+2HDh2qxMREJScnu0/n8cUXX+jgwYNasWKFxo0bp5EjR+quu+5SWFiYVq1apR49eqh3794eJ5TLUqFCBb17bvvt3LlThw4d0pQpU9SxY0dlZmZ6vH965MgRLVq0SA8V8Lnaeel9hTh79qyef/55fTRrlhr94x+Sfj91w9oNGzTzvffULD5eURERmpSUpF79++vIzz/r/1au1MqFCy84DcSgPn10U/PmkqRXJkxQnaZN9cny5Wp/6616Ydo03XPPPXr8XFyrVq2qCRMmqGXLlnrxxRd14MABffjhh1q0aJFuvvlmSVJsbKx73qVLl5YkXXXVVe73prLGvmjRIvexJWNjY/XVV1/pjTfeUPPmzTV9+nRVrlxZ48aNkyRVr15dW7du1cSJE/Nl+z377LNqfm6dBwwYoPbt2+vMmTMKDAzUSy+9pK5du6pr1646feiQhvTvr5WrV3vs+YydMkUj//Uv3d66tSQpJjpaO3fv1szZs3Xvub05SXq0Wzf3NAU55vOVLFnSfYqN80/WNnnyZPXv31///Oc/JUlJSUn64osv9NJLL2nyeWff7N27t+68806PeYaFhWnSpEny8fFR9erVNWnSJJ06dUqDBg2SJA0cOFATJkzQV1995Z5/Fl9fX/d7pOXKlfN4j/K+++7TjBkz3Ceue+edd1SxYkXdeOONedtIOUQorxB79uzRqVOn1OHBBz0uT8/IUN2rr3b/fGfbtlq0bJkmv/qqJo4cqSrnRSxLo7g49//DQkNVtXJl7Tq3l7F1+3Zt3blTc+bMcU9jjJHL5dLevXv17bffytfX1/0Ezs3Yb7vtNs+xp6erXr16kn7/MOLaa6/1uP7Pn+peijrnnZQtKyZHjhxRxYoVtXPnTvdbBFmujYtzv3z/7dQppezbpz6DB6vvv/7lnsbpdCq4VCmP29WvW7dQxmyTlpamQ4cOqUmTJh6XN2nSRFu2bPG47B/nfvGe7+qrr/Y4lmS5cuVUu3Zt98++vr4qXbq0fv7555ytzDk9evTQtddeqx9//FHly5fXzJkz1bVr10t6XzUnCOUV4uTJk5KkOf/+tyLDwz2u8/f3d///1OnT2nQuZnvycLqEk6dOqXv37kpMTLzguujoaO3Zsyf38zw39vnz5ysqKsrjuvPHnls+555c5x+7+q/+5Or8Dygc2dzuYrLen508erQa1K/vcd2fj/xeonjxIjHm3ChRosQFl2V3MrrsLsvtBzdxcXGqV6+eZs2apZYtW2rr1q3uc7sXJEJ5hahVq5YCAgL0w8GDanaRPa2hSUly+Pjo/TffVKfu3dXyppt0fdOmHtOsT05WhXOnkk09flx7UlJUvWpVSVK92rW1Y8cOValSJdv516lTRy6XS//973/dL73Pl/XkPv/EYVljP3DgwF/uidasWfOCJ8z//ve/v1xPSSpTpowk6acjR6Rzezvnf7CTUzVq1ND69evVuXNn92UbkpPd/y931VWKDA/X3gMH1PFPL1FzK2vMhw8fdl+2efPmS5pndoKDgxUZGak1a9Z4bPM1a9aoYcOG+b68P8v6BZjdCeQefvhhTZ48WT/++KMSEhIUHR1d4OMhlFeIUqVKqW/fvhoyapRcLpcaN2yotBMntG7jRpUqWVL3duig5Z99pnfnzdOyefNUr04dPd6jh3oNHKgvFy9W6HmniR0/dapKh4bqqrJllTRxokqHhenWFi0kSU/07KmWd9+tvn37qlu3bipRooS2b9+uTz/9VJMnT1ZMTIzuv/9+9ezZUxMnTtQ111yj/fv368iRI7r77rtVsWJFORwO9zl3goKC3GMfNGiQXC6XmjZtquPHj2vNmjUKDg7W/fffr4cfflhTpkzR008/rW7duunrr7/WO++8c9FtEhQYqIZxcZr86quqGB2tX44eVdKkSbnetomJiXrkkUf0j3/8Q3FVquiDjz/Wju++U6XznsBP9e2rp599VsGlSumW669Xenq6vtmyRanHjyvx4YdzvKygwEA1atRIEyZMUKVKlfTzzz9rxIgRuR5zTjz55JMaNWqUYmNj3Xtxmzdv1sxzH/YVpKzHwSeffKK2bdsqKCjI/edM9913nwYMGKDp06d7fHJekPjUuwhyZmYqw+m86D9nHk7VOnz4cA3o3VuTp01T45Yt1bFbNy3//HPFnItEn8GD9VSfPqp37r2twX37qlyZMur3zDOe8xk4UE+PHKmb27XTTz//rNnTp7v3AGrXqqXly5dr9+7dSkhIUOPGjTVy5EiPl8wvvvii2rdvryeeeEL16tVTr169dOrUKUlS+fLlNXToUA0dOlQxMTHuT8uHDx+uwYMHa/z48apfv77atWunpUuXqlKlSpJ+f2LNnj1bCxcuVKNGjfTGG2/o2WeftW6TqePGyZmZqZvvuEP/GjlSQ/r1y/V27dixo55++mkNGTJEN7drpx8OHtRD993nMc2DnTppypgxem/ePF3Xtq1uu/dezf7wQ8XkYW/otddek9PpVNOmTTVw4MACC2ViYqL69OmjwYMHq2HDhlqxYoXmzZunqudePRSkqKgojRgxQoMHD1Z4eLh69+7tvi4kJEQdOnRQyZIlL/gQqaBwcrEcKKyTi504cCB338wJD8/1N3Mu5eRXX65dqzvuu08pyckKucjjoCie/IqTi/195OTkYrfccotq166tF1988aLzyq+Ti/HSuwjx8/NTlehoZebwl0SJqKjL6uuLgM2xY8e0cuVKrVy5Uq+88kqhLZdQFjF+fn45vlOIJK40cXFxOnbsmMaNG6caNWoU2nIJJXLsusaN9eu57wQD3rA3D3+ylh/4MAcALAilFxXEH/8C+EN+PccIpRc4HA4ZY3TmzBlvDwW4rGX92dmlvp/Pe5Re4HA45OPj4/6ea2BgoBwOh9Jz+beRPtkcyssmt8vIi7yMq6DlZb3Zvt6R9edBWc+TvDDG6NSpUzpy5IhCQ0Mv+KpobhFKLylWrJgyMjL0008/ub+Hm3H8eO7mce63ZW7kdhl5kZdxFbS8rDfb1zuMMTLGyN/f/5IPdhEaGupxRKS8IpRe4nA45O/v735QSNLeefNyNY9qjz6a6+Xmdhl5kZdxFbS8rDfb1zsyMjKUnp6u2NhYjy9k5FaxYsUueU8yC6H0MofD4f6t6Tx2LFe3zcvLktwuIy/y+nKpIOVlvdm+3pH1fAgICLjg2zTeQij/xvLy1cqiKL+/IppfLpftWxiK6n2YXy7vX00AkA8IJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFn7eHgAuP1uTkrw9BCBfsUcJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABZ+3h7ApdqalOTtIWSrqI7rcsH2RWFijxIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWhBIALAglAFgQSgCwIJQAYEEoAcCCUAKABaEEAAtCCQAWft4eAIDc2ZqU5O0hXHHYowQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEgAsCCUAWBBKALAglABgQSgBwIJQAoAFoQQAC7+cTGSMkSSlpaUV6GDOnDmjU+np8jFGPueWCeDy9Ouvv2Z7udPplNPpVFpamtLT0wts+Vk9MzlojcPkYKoffvhB0dHRlz4yAChiDhw4oAoVKlx0mhyF0uVy6eDBgypVqpQcDke+DfDP0tLSFB0drQMHDig4OLjAloOii8cACusxYIzRiRMnFBUVJR+fi78LmaOX3j4+Ptbi5qfg4GCeJFc4HgMojMdASEhIjqbjwxwAsCCUAGBRpEIZEBCg4cOHKyAgwNtDgZfwGEBRfAzk6MMcALiSFak9SgAoigglAFgQSgCwIJQAYFFkQvnyyy+rUqVKCgwMVHx8vP73v/95e0jIJyNGjJDD4fD4V7NmTff1Z86cUWJiosqUKaOSJUuqQ4cO+umnnzzmsX//ft16660qXry4ypUrp4EDB8rpdBb2qiCHVq1apdtvv11RUVFyOBxasGCBx/XGGA0bNkyRkZEKCgpSQkKCvvvuO49pfv31V3Xu3FnBwcEKDQ1V9+7ddfLkSY9pNm/erObNmyswMFDR0dF6/vnnC2R9ikQo586dq379+mn48OH6+uuvVa9ePbVq1UpHjhzx9tCQT2rXrq1Dhw65/3355Zfu65588kktXLhQH3zwgb744gsdPHhQd911l/v6zMxM3XrrrUpPT9dXX32lt956SzNnztSwYcO8sSrIgd9++0316tXTyy+/nO31zz//vF588UW9+uqrWrdunUqUKKFWrVrpzJkz7mk6d+6srVu3asWKFfrkk0+0atUqPfLII+7r09LS1LJlS8XExGjjxo0aP368RowYoddffz3/V8gUAY0aNTKJiYnunzMzM01UVJQZM2aMF0eF/DJ8+HBTr169bK9LTU01xYoVMx988IH7su3btxtJZs2aNcYYYxYvXmx8fHzM4cOH3dNMmzbNBAcHm7Nnzxbo2HHpJJn58+e7f3a5XCYiIsKMHz/efVlqaqoJCAgws2fPNsYYs23bNiPJrF+/3j3NkiVLjMPhMD/++KMxxphXXnnFhIWFeTwGnnrqKVOjRo18Xwev71Gmp6dr48aNSkhIcF/m4+OjhIQErVmzxosjQ3767rvvFBUVpcqVK6tz587av3+/JGnjxo3KyMjwuP9r1qypihUruu//NWvWqG7dugoPD3dP06pVK6WlpWnr1q2FuyK4ZCkpKTp8+LDHfR4SEqL4+HiP+zw0NFQNGzZ0T5OQkCAfHx+tW7fOPc31118vf39/9zStWrXSzp07dezYsXwds9dD+csvvygzM9PjSSBJ4eHhOnz4sJdGhfwUHx+vmTNnaunSpZo2bZpSUlLUvHlznThxQocPH5a/v79CQ0M9bnP+/X/48OFsHx9Z1+HvJes+u9hz/vDhwypXrpzH9X5+fipdurRXHhc5OnoQcCnatGnj/v8111yj+Ph4xcTE6P3331dQUJAXRwbkjNf3KMuWLStfX98LPuX86aefFBER4aVRoSCFhoaqevXq2r17tyIiIpSenq7U1FSPac6//yMiIrJ9fGRdh7+XrPvsYs/5iIiICz7MdTqd+vXXX73yuPB6KP39/dWgQQN9+umn7stcLpc+/fRTNWnSxIsjQ0E5efKk9uzZo8jISDVo0EDFihXzuP937typ/fv3u+//Jk2aaMuWLR5PnBUrVig4OFhXX311oY8flyY2NlYREREe93laWprWrVvncZ+npqZq48aN7mk+++wzuVwuxcfHu6dZtWqVMjIy3NOsWLFCNWrUUFhYWP4OOt8/HsqDOXPmmICAADNz5kyzbds288gjj5jQ0FCPTznx99W/f3+zcuVKk5KSYlavXm0SEhJM2bJlzZEjR4wxxjz66KOmYsWK5rPPPjMbNmwwTZo0MU2aNHHf3ul0mjp16piWLVua5ORks3TpUnPVVVeZp59+2lurBIsTJ06Yb775xnzzzTdGkpk0aZL55ptvzL59+4wxxowdO9aEhoaajz/+2GzevNm0a9fOxMbGmtOnT7vn0bp1axMXF2fWrVtnvvzyS1OtWjVz7733uq9PTU014eHh5oEHHjDffvutmTNnjilevLh57bXX8n19ikQojTFm6tSppmLFisbf3980atTIrF271ttDQj7p1KmTiYyMNP7+/qZ8+fKmU6dOZvfu3e7rT58+bXr16mXCwsJM8eLFTfv27c2hQ4c85rF3717Tpk0bExQUZMqWLWv69+9vMjIyCntVkEOff/65kXTBvy5duhhjfv8ToaFDh5rw8HATEBBgbrnlFrNz506PeRw9etTce++9pmTJkiY4ONh069bNnDhxwmOaTZs2meuuu84EBASY8uXLm7FjxxbI+nCYNQCw8Pp7lABQ1BFKALAglABgQSgBwIJQAoAFoQQAC0IJABaEEjk2YsQI1a9f39vDyLVKlSpp8uTJlzSPmTNnehzh6O+6LZA3hPIytnLlSjkcjgsOOJFXAwYM8Ph+7pWsoLaF7RQK8A5CCStjjJxOp0qWLKkyZcpc0rzOP4BBfkznLfmxLbJjO4UCvINQetGNN96o3r17q3fv3goJCVHZsmU1dOhQnf+t0mPHjunBBx9UWFiYihcvrjZt2nichGnfvn26/fbbFRYWphIlSqh27dpavHix9u7dq5tuukmSFBYWJofDoa5du0r6/ehMY8aMUWxsrIKCglSvXj3NmzfPPc+sPdElS5aoQYMGCggI0JdffnnBy02Xy6XnnntOFSpUUEBAgOrXr6+lS5e6r9+7d68cDofmzp2rG264QYGBgXr33Xez3RYOh0PTpk3THXfcoRIlSigpKUmZmZnq3r27e5w1atTQlClTPG7XtWtX3XnnnZowYYIiIyNVpkwZJSYmXjS0b7zxhkJDQy+6Rzhz5kxVrFhRxYsXV/v27XX06FGP6/+8LbLGMXr0aIWHhys0NFTPPfecnE6nBg4cqNKlS6tChQqaMWPGXy5T+v3YnaNGjVL79u0vOh0KWYF8gxw5csMNN5iSJUuaJ554wuzYscO88847pnjx4ub11193T3PHHXeYWrVqmVWrVpnk5GTTqlUrU7VqVZOenm6MMebWW281LVq0MJs3bzZ79uwxCxcuNF988YVxOp3mww8/NJLMzp07zaFDh0xqaqoxxphRo0aZmjVrmqVLl5o9e/aYGTNmmICAALNy5UpjzB8HNLjmmmvM8uXLze7du83Ro0cvOPfNpEmTTHBwsJk9e7bZsWOHGTRokClWrJjZtWuXMcaYlJQUI8lUqlTJfPjhh+b77783Bw8ezHZbSDLlypUzb775ptmzZ4/Zt2+fSU9PN8OGDTPr168333//vXv7zJ071327Ll26mODgYPPoo4+a7du3m4ULF16wDWNiYswLL7xgjDFm3LhxpkyZMmbdunV/eb+sXbvW+Pj4mHHjxpmdO3eaKVOmmNDQUBMSEuKe5s/bokuXLqZUqVImMTHR7Nixw/z73/82kkyrVq1MUlKS2bVrlxk5cqQpVqyYOXDgwEUeFZ7b5PxzzcB7CKUX3XDDDaZWrVrG5XK5L3vqqadMrVq1jDHG7Nq1y0gyq1evdl//yy+/mKCgIPP+++8bY4ypW7euGTFiRLbzzwresWPH3JedOXPGFC9e3Hz11Vce03bv3t19CKus2y1YsMBjmj/HISoqyiQlJXlMc+2115pevXoZY/4I5eTJk63bQpLp27evdbrExETToUMH989dunQxMTExxul0ui/r2LGj6dSpk/vnrFAOGjTIREZGmm+//faiy7j33ntN27ZtPS7r1KmTNZQxMTEmMzPTfVmNGjVM8+bN3T87nU5TokQJ9wm0bAhl0cGpILyscePGcjgc7p+bNGmiiRMnKjMzU9u3b5efn5/7QKWSVKZMGdWoUUPbt2+XJPXp00ePPfaYli9froSEBHXo0EHXXHPNXy5v9+7dOnXqlFq0aOFxeXp6uuLi4jwuO//ETn+WlpamgwcPqlmzZh6XN2vWTJs2bcrxfGzTvfzyy3rzzTe1f/9+nT59Wunp6Rd82ly7dm35+vq6f46MjNSWLVs8ppk4caJ+++03bdiwQZUrV77oOLZv337BS98mTZp4vK2Qndq1a8vH5493s8LDw1WnTh33z76+vipTpgynYf4b4j3Kv7mHH35Y33//vR544AFt2bJFDRs21NSpU/9y+qwTyC9atEjJycnuf9u2bfN4n1KSSpQokS9jzOl8/jzdnDlzNGDAAHXv3l3Lly9XcnKyunXrpvT0dI/pihUr5vGzw+GQy+XyuKx58+bKzMzU+++/n4c1yJnsxpGTsaHoI5RelnXqzSxr165VtWrV5Ovrq1q1asnpdHpMc/ToUe3cudPjFAjR0dF69NFH9dFHH6l///6aPn26JLlP45mZmeme9uqrr1ZAQID279+vqlWrevyLjo7O8biDg4MVFRWl1atXe1y+evXqfDs9w+rVq9W0aVP16tVLcXFxqlq1qvbs2ZOneTVq1EhLlizR6NGjNWHChItOW6tWrWzvF1y5eOntZfv371e/fv3Us2dPff3115o6daomTpwoSapWrZratWunHj166LXXXlOpUqU0ePBglS9fXu3atZMk9e3bV23atFH16tV17Ngxff7556pVq5YkKSYmRg6HQ5988onatm2roKAglSpVSgMGDNCTTz4pl8ul6667TsePH9fq1asVHBysLl265HjsAwcO1PDhw1WlShXVr19fM2bMUHJy8l9+sp1b1apV06xZs7Rs2TLFxsbq7bff1vr16xUbG5un+TVt2lSLFy9WmzZt5Ofnp759+2Y7XZ8+fdSsWTNNmDBB7dq107Jly6wvu/PLyZMntXv3bvfPKSkpSk5OVunSpVWxYsVCGQMuxB6llz344IM6ffq0GjVqpMTERD3xxBN65JFH3NfPmDFDDRo00G233aYmTZrIGKPFixe7X9JlZmYqMTFRtWrVUuvWrVW9enW98sorkqTy5cvr2Wef1eDBgxUeHq7evXtLkkaOHKmhQ4dqzJgx7tstWrQo1wHq06eP+vXrp/79+6tu3bpaunSp/vOf/6hatWr5sm169uypu+66S506dVJ8fLyOHj2qXr16XdI8r7vuOi1atEjPPPPMX75F0bhxY02fPl1TpkxRvXr1tHz5cj3zzDOXtNyc2rBhg+Li4tzvF/fr109xcXEaNmxYoSwf2eNUEF504403qn79+pf89ToABYs9SgCwIJQAYMFLbwCwYI8SACwIJQBYEEoAsCCUAGBBKAHAglACgAWhBAALQgkAFoQSACz+HwDluvgVdB08AAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] @@ -858,14 +919,14 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 18, "id": "7de26848", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d7d78236107c4e46bc6bfe7333cd3de0", + "model_id": "", "version_major": 2, "version_minor": 0 }, @@ -878,7 +939,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAIWCAYAAACFnY2vAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB9hklEQVR4nO3dd3gU1R7G8e8mpID03ntTaVKlCYEAAtJRei8qWOiIIFVEihQBQRCkSxNQeglFQASpIghIDQKhRQgJkLZz/xjZa6Rlk91syvt5njzuzs6c/DKXS17OnGIxDMNARERExEXcXF2AiIiIJG0KIyIiIuJSCiMiIiLiUgojIiIi4lIKIyIiIuJSCiMiIiLiUgojIiIi4lIKIyIiIuJSCiMiIiLiUgojIiIi4lIuDSM//fQTDRo0IHv27FgsFtasWfPca3bu3Enp0qXx8vKiYMGCzJs3z+l1ioiIiPO4NIyEhIRQsmRJpk+fHq3zL1y4QP369fHx8eHo0aP06tWLrl27snnzZidXKiIiIs5iiS8b5VksFlavXk3jxo2fes7AgQNZv349v//+u+1Yy5YtuXPnDps2bYqDKkVERMTRkrm6AHvs27cPX1/fKMfq1KlDr169nnpNaGgooaGhtvdWq5XAwEAyZMiAxWJxVqkiIiKJjmEY3Lt3j+zZs+Pm5riHKwkqjAQEBJAlS5Yox7JkyUJQUBAPHjwgefLkj10zZswYRowYEVclioiIJHqXL18mZ86cDmsvQYWRmBg0aBB9+vSxvb979y65c+fm8uXLpE6d2oWViYiIxH/Xr1+nUiVfbt3yB/IAl0iVKpVDv0eCCiNZs2bl+vXrUY5dv36d1KlTP7FXBMDLywsvL6/HjqdOnVphRERE5Dlu336BoKCqwH46d17L3LkvOnyYQ4JaZ6RixYr4+flFObZ161YqVqzooopEREQSr4gI6NDBnbCw+ZQq9TNjx2Z3yvdxac9IcHAwZ8+etb2/cOECR48eJX369OTOnZtBgwZx5coVFixYAMA777zDtGnTGDBgAJ07d2b79u0sX76c9evXu+pHEBERSXTOnz/PjBkzSJ78c3bvdidVKndWrsyEp2eQU76fS8PIwYMH8fHxsb1/NLajQ4cOzJs3j2vXruHv72/7PF++fKxfv57evXszZcoUcubMyTfffEOdOnXivHYREZHE6Ny5c1SvXp2//voL8AZGMXMmFCgAQc7JIvFnnZG4EhQURJo0abh79+5Tx4wYhkFERASRkZFxXF3i4u7uTrJkyTSFWkQkgTh79iw+Pj789ddfJEtWlIiIHXTsmJVvvzU/j87v0JhIUANY40JYWBjXrl3j/v37ri4lUUiRIgXZsmXD09PT1aWIiMgz/Pnnn/j4+HDlyhVSpnyR4OAdFC6chalTnf+9FUb+xWq1cuHCBdzd3cmePTuenp76V30MGYZBWFgYN2/e5MKFCxQqVMihC+SIiIjj/Pnnn1SvXp2rV6+SNetLBARsx9MzC0uXQsqUzv/+CiP/EhYWhtVqJVeuXKRIkcLV5SR4yZMnx8PDg0uXLhEWFoa3t7erSxIRkf8ICwujTp06XL16lQIFXuby5e1AZsaNg1deiZsa9E/VJ9C/4B1H91JEJH7z9PRk6tSpvPJKGdzcdhAWlpn69eGDD+KuBv2mEBERSYL+PX+lfv36lC59gD//zES2bPDttxCXoxQURkRERJKYP/74g/Lly3Pu3DkAli+HOXPcsFhg8WLIlClu61EYERERSUJOnjxJ9erVOXjwIL169eLiReje3fzs44/hX8t/xRmFkUTAYrE882v48OG2c4sWLYqXlxcBAQGPtVO9enXbNd7e3hQuXJgxY8ZE6cq7ePFilLYzZMhA7dq1OXLkSFz8qCIiEgsnTpygevXq3Lhxg1KlSjF79jxatYK7d6FiRRg2zDV1KYwkAteuXbN9TZ48mdSpU0c51q9fPwD27NnDgwcPaN68OfPnz39iW926dePatWucPn2aQYMGMXToUGbOnPnYedu2bePatWts3ryZ4OBg6taty507d5z5Y4qISCz8/vvv+Pj4cPPmTV555RX8/PyYOjUDv/wCadLAkiXg4eGa2hRGnsMwICTENV/RXRs3a9astq80adJgsViiHEv5zyTxOXPm0Lp1a9q1a8fcuXOf2FaKFCnImjUrefLkoVOnTpQoUYKtW7c+dl6GDBnImjUrZcuWZcKECVy/fp39+/fH+D6LiIjzHD9+3BZESpcuzbZt2zh6ND1jxpiff/MN5M3ruvq0zshz3L8fNwu+PElwMLzwgmPaunfvHitWrGD//v0ULVqUu3fvsnv3bqpWrfrE8w3DYM+ePZw6dYpChQo9s+3kyZMD5lx1ERGJf/r27cutW7coU6YMW7duJSIiHW3bmv/o7d4dmjd3bX3qGUkili5dSqFChXj55Zdxd3enZcuWzJkz57HzvvrqK1KmTImXlxevvfYaVquVD54x2fzOnTuMGjWKlClTUr58eWf+CCIiEkPfffcdnTp1Ytu2baRJk46OHeHaNXjpJZg0ydXVqWfkuVKkMHsoXPW9HWXu3Lm0bdvW9r5t27ZUq1aNqVOnkipVKtvxNm3aMHjwYP7++2+GDRtGpUqVqFSp0mPtVapUCTc3N0JCQsifPz/Lli0jS5YsjitYRERiJTAwkPTp0wPmo/VHj+cnT4YNG8DbG5YudezvmphSGHkOi8Vxj0pc5eTJk/zyyy8cOHCAgQMH2o5HRkaydOlSunXrZjuWJk0aChYsCMDy5cspWLAgr776Kr6+vlHaXLZsGS+99BIZMmQgbdq0cfJziIhI9Bw+fJhatWoxatQoevToYTu+bx8MGGC+njgRihd3UYH/occ0ScCcOXN47bXXOHbsGEePHrV99enT54mPah5JmTIlH374If369YsyvRcgV65cFChQQEFERCSeOXz4ML6+vgQGBrJw4UIiIiIAOHcOGjaE8HBo1gzeecfFhf6LwkgiFx4ezsKFC2nVqhXFihWL8tW1a1f279/PiRMnnnr922+/zZkzZ/j+++/jsGoREYmJQ4cOUbNmTf7++29effVVNm/eTLJkyQgMhPr14dYtKFMG5s+P2+Xen0dhJJH78ccfuX37Nk2aNHnssxdffJEXX3zxmb0j6dOnp3379gwfPhyr1erMUkVEJBZ+/fVXfH19uXPnDhUrVmTz5s2kTp2asDBo2hROn4ZcuWDt2vg3/MBi/Lf/PZELCgoiTZo03L17l9SpU0f57OHDh1y4cIF8+fJpu3sH0T0VEXG+AwcOULt2be7evUvlypXZuHEjqVKlwjCgQwdYuBBSpYK9e2M3TuRZv0NjQz0jIiIiCdzOnTu5e/cuVapUsQURgFGjzCDi7g4rV8afAav/pdk0IiIiCdyAAQPIlCkTzZs3twWRRYv+v9fMjBlQu7YLC3wO9YyIiIgkQEePHiX4XwthderUyRZEfvoJunQxjw8YAP9awSFeUhgRERFJYPbt28drr71G/fr1CQkJifLZmTPQpAmEhZlTeB/tPxOfKYyIiIgkID///DO1a9fm3r17uLlF/TV+6xbUqweBgVChgjlexC0B/KZPACWKiIgIwJ49e6hTpw7BwcHUqFGD9evX88I/83QfPoTGjc3FzfLmhR9+gH/2MY33FEZEREQSgN27d/P6668THBxMzZo1Wbt2LSn+2VjGaoVOncypu2nSmHvPJKTtwhRGRERE4rndu3dTt25dQkJC8PX1jRJEAIYONTe9S5YMVq2CF190YbExoKm9IiIi8VyqVKnw8vKiUqVK/PDDDyT/1/OXb7+F0aPN17NmQY0aLioyFtQzkkhUr16dXr16PfXzvHnzMnnyZId+T2e0KSIijytVqhR79+59LIhs3w7du5uvBw82H9UkROoZSSRWrVqFh4eHq8sQEREH2bFjBx4eHlSpUgWAokWLRvn85Elzz5mICGjZEkaOdEWVjqEwkkikT5/e1SWIiIiD+Pn50aBBA9zd3dm7dy8lSpSI8vn16+YuvHfvQuXK5qOahDCF92kScOlxKyQk5KlfDx8+jPa5Dx48iNa59vr3Y5obN27QoEEDkidPTr58+Vi8ePFj59+5c4euXbuSKVMmUqdOTY0aNTh27Jjt83PnztGoUSOyZMlCypQpKVeuHNu2bbO7LhERsc+2bdt44403ePDgAdWqVaNIkSJRPr9/Hxo2hIsXoUABWLMGEvo+pOoZiaaUKVM+9bN69eqxfv162/vMmTNz//79J55brVo1du7caXufN29ebt269dh5sdlMuWPHjly9etXWxffBBx9w48aNKOe8+eabJE+enI0bN5ImTRq+/vpratasyZkzZ0ifPj3BwcHUq1eP0aNH4+XlxYIFC2jQoAGnT58md+7cMa5NRESebuvWrTRs2JCHDx/yxhtvsHLlSry8vGyfR0ZC27Zw4ACkS2dO4c2Y0YUFO4jCSCJz5swZNm7cyIEDByhXrhwAc+bM4cV/zfPas2cPBw4c4MaNG7Y/5BMmTGDNmjWsXLmS7t27U7JkSUqWLGm7ZtSoUaxevZoff/yR9957L25/KBGRJGDLli00bNiQ0NBQGjRowIoVK6IEEcOAHj1g9Wrw9DR7RAoXdl29jqQwEk3/3ozov9zd3aO8/28vxL/9d+neixcvxqqu//rjjz9IliwZZcqUsR0rWrQoadOmtb0/duwYwcHBZMiQIcq1Dx484Ny5c4D58w4fPpz169dz7do1IiIiePDgAf7+/g6tV0RE4MCBA7Yg0qhRI5YvX46np2eUc0aMMKfuWiyweDG89pqLinUChZFoerTcrivPdZTg4GCyZcsW5XHRI49CS79+/di6dSsTJkygYMGCJE+enObNmxMWFha3xYqIJAElS5bE19cXDw8Pli1b9lgQmTHDDCMA06dD8+YuKNKJFEYSmaJFixIREcGhQ4dsj2lOnz7NnTt3bOeULl2agIAAkiVLRt68eZ/Yzt69e+nYsSNNmjQBzADj6F4cERExeXl58f3332OxWB4LIitXQs+e5uuhQ+Hdd11QoJNpNk0iU6RIEV5//XXefvtt9u/fz6FDh+jatWuURXJ8fX2pWLEijRs3ZsuWLVy8eJGff/6ZwYMHc/DgQQAKFSrEqlWrOHr0KMeOHaN169ZYrVZX/VgiIonOunXr6N+/v23CgpeX12NBZOdOaNPGHC/SvTsMHx73dcYFhZFE6NtvvyV79uxUq1aNpk2b0r17dzJnzmz73GKxsGHDBl577TU6depE4cKFadmyJZcuXSLLPzsrTZw4kXTp0lGpUiUaNGhAnTp1KF26tKt+JBGRRGXt2rU0bdqUCRMmsHDhwieec/QoNGoEYWHm4mZffWWOF0mMLEZs5pAmQEFBQaRJk4a7d++SOnXqKJ89fPiQCxcukC9fPrwT+qTteEL3VEQkqh9//JHmzZsTHh7Om2++yeLFix9bQfv8eXMxs4AAc6Dq5s3xYy2RZ/0OjQ31jIiIiMSRNWvW2ILIW2+9xZIlSx4LIjduQJ06ZhApUQJ++CF+BBFnUhgRERGJA6tXr+bNN98kPDycli1bsnjxYpIlizqP5N49qFcPzp6FvHlh40b418oMiZbCiIiIiJNdvXqVVq1aERERQevWrVm4cOFjQSQ0FJo0gUOHzFVVN2+G7NldVHAc09ReERERJ8uePTtz5sxh69atzJkz57HFMq1W6NAB/PzghRfMHpHEsrpqdCiMPEESG9PrVLqXIpKUhYeH28aEtGnThjZt2jx2jmFAr16wbBl4eJjLvZctG8eFupge0/zLoz8wT9vkTuz36F7+d4CWiEhit3z5ckqVKsXVq1efed7nn8PUqebr+fOhVq04KC6eUc/Iv7i7u5M2bVrb3jIpUqTAklgndTuZYRjcv3+fGzdukDZt2se6JEVEErNly5bRpk0bIiMj+eqrr/j000+feN6cOfDxx+bryZOhVau4qzE+URj5j6xZswLP3uxOoi9t2rS2eyoikhR89913tG3bFqvVSqdOnRjxaFOZ//jxR3NVVYCPPoIPP4zDIuMZhZH/sFgsZMuWjcyZMxMeHu7qchI0Dw8P9YiISJKyZMkS2rVrh9VqpXPnzsyePfux3doB9u6FFi3MgaudOsFnn7mg2HhEYeQp3N3d9YtURESibdGiRXTo0AGr1UrXrl35+uuvnxhETp+GN96Ahw/N/86alXiXeY8uDWAVERGJpbCwMEaPHo3VaqVbt25PDSJ37kDDhuZ/K1Y0Z9AkU7eAekZERERiy9PTk23btjF79myGDh36xCASGQktW8KZM5A7N6xZAylSxH2t8ZF6RkRERGLo0qVLttc5cuRg+PDhTwwiAAMHmquqpkhh7jfzr83UkzyFERERkRj49ttvKViwIN99991zz12wAL74wnw9bx6UKuXU0hIchRERERE7zZkzhy5duhAREcEvv/zyzHP37///FN5PPoE334yDAhMYhRERERE7fPPNN3Tt2hXDMHjvvfeYPHnyU8+9csXc/C40FBo3huHD46rKhEVhREREJJpmzZpFt27dAHj//ff58ssvn7pS94MHZhC5dg2KFTMf1TxlOEmSp9siIiISDV9//TVvv/02AB9++CFTpkx5ahAxDPPRzK+/Qvr05oDVVKnistqERWFEREQkGk6ePAlA7969mTRp0jP3LpswARYtAnd3WLkS8uePqyoTJq0zIiIiEg2TJ0/Gx8eHRo0aPTOIbNhgTuMFmDIFfHziqMAETD0jIiIiT7F27VpCQ0MBc++yxo0bPzOInDpl7rz76DFNjx5xVWnCpjAiIiLyBFOnTqVhw4a8+eabREREPPf8v/+GRo0gKAiqVoWpU7XnTHQpjIiIiPzHlClT+OCDDwB4+eWXn7txamSk2SPyaKn3lSvB0zMuKk0cFEZERET+ZfLkyfTq1QuAQYMG8dlnnz3z0QxoqffYUhgRERH5x6RJk+jduzcAgwcPZvTo0c8NIlrqPfYURkRERIAvv/ySPn36APDJJ58watSo5wYRLfXuGJraKyIiApQuXZoXXniBvn37Mnz48OcGES317jgKIyIiIkCVKlU4ceIEefLkee65/13qfeFCLfUeG7p1IiKSZE2ePJljx47Z3kcniBgGdOtmLvWeIQP8+COkTOnMKhM/hREREUmSPvvsM3r37k3NmjW5ceOGHdfB4sWQLBmsWAH58jmxyCRCYURERJKcTz/9lMGDBwPmXjOZozkXd+VKGDLEfD1tmpZ6dxSNGRERkSRl1KhRDB06FDB7RwYNGhSt6w4dgvbtzdcffgj/bOArDqCeERERSTJGjBhhCyJjxoyJdhC5cgUaNjQHrtat+/91RcQx1DMiIiJJwrx58xj+z/zbsWPHMmDAgGhdd/++uefM1avw0kuwdCk8Z3V4sZPCiIiIJAnNmzdn7ty5NGjQgP79+0frGqvVfDRz6BBkzAjr1kHq1E4uNAlSGBERkSQhZcqU+Pn54eHhEe1rhg2D7783N71bvVozZ5xFY0ZERCRRMgyDIUOGMGrUKNsxe4LI4sXw6afm61mzoEoVR1coj6hnREREEh3DMBg8eDBjxowBoE6dOpQvXz7a1+/bB126mK8HDoQOHZxRpTyiMCIiIomKYRgMGjSIsWPHAjBlyhS7gsilS+ZeM4/2nPnsM+fUKf+nMCIiIomGYRgMHDiQ8ePHAzB16lTee++9aF9/7x688QbcuAGlSmnPmbiiMCIiIomCYRj079+fL/5ZBGTatGn07Nkz2tdHRkLr1vD775A1q/aciUsKIyIikijs3bvXFkSmT59Ojx497Lp+4EBz6q63N/zwA+TK5Ywq5Ulc3vk0ffp08ubNi7e3NxUqVODAgQPPPH/y5MkUKVKE5MmTkytXLnr37s3Dhw/jqFoREYmvqlSpwpdffsmMGTPsDiJz5vx/VdV588COISbiAC7tGVm2bBl9+vRh5syZVKhQgcmTJ1OnTh1Onz79xE2LlixZwkcffcTcuXOpVKkSZ86coWPHjlgsFiZOnOiCn0BERFzJMAxCQkJI+c/zlPfff9/uNnbuhHfeMV8PHw4tWjiuPokel/aMTJw4kW7dutGpUydeeuklZs6cSYoUKZg7d+4Tz//555+pXLkyrVu3Jm/evNSuXZtWrVo9tzdFREQSH8Mw+PDDD3nttdcIDAyMURtnz0KzZhARYYaQf7atkTjmsjASFhbGoUOH8PX1/X8xbm74+vqyb9++J15TqVIlDh06ZAsf58+fZ8OGDdSrV++p3yc0NJSgoKAoXyIikrAZhsEHH3zA1KlTOXLkCDt27LC7jTt3oEEDCAw0H8t8+y1YLI6vVZ7PZY9pbt26RWRkJFmyZIlyPEuWLJw6deqJ17Ru3Zpbt25RpUoVDMMgIiKCd955h48//vip32fMmDGMGDHCobWLiIjrGIbBe++9x1dffYXFYmH27Nk0a9bMrjbCw+Gtt+DUKciZE9asgeTJnVOvPJ/LB7DaY+fOnXz22Wd89dVXHD58mFWrVrF+/fooS/3+16BBg7h7967t6/Lly3FYsYiIOJLVaqVnz562IDJnzhy6PFoqNZoiI6FdO9i6FVKkMKfwZsvmpIIlWlzWM5IxY0bc3d25fv16lOPXr18na9asT7zmk08+oV27dnTt2hWA4sWLExISQvfu3Rk8eDBuT1iZxsvLCy8vL8f/ACIiEqesVis9evTg66+/xmKx8O2339LBznXarVbo1g2WLQMPD1ixAl55xUkFS7S5rGfE09OTMmXK4OfnZztmtVrx8/OjYsWKT7zm/v37jwUOd3d3wOy2ExGRxOvGjRusW7cOi8XCvHnz7A4ihgEffGCODXFzgyVL4BlDDiUOuXRqb58+fejQoQNly5alfPnyTJ48mZCQEDp16gRA+/btyZEjh22jowYNGjBx4kReeeUVKlSowNmzZ/nkk09o0KCBLZSIiEjilDVrVnbs2MHhw4dpYef8W8MwFzWbPt0cpDp/PjRv7qRCxW4uDSMtWrTg5s2bDB06lICAAEqVKsWmTZtsg1r9/f2j9IQMGTIEi8XCkCFDuHLlCpkyZaJBgwaMHj3aVT+CiIg4kdVq5bfffqNUqVIAFCpUiEKFCtndzsiR8M92NcycCW3bOrBIiTWLkcSebwQFBZEmTRru3r1L6tSpXV2OiIg8hdVqpWvXrixevJg1a9ZQt27dGLUzfjwMGGC+njQJevVyXI1JjbN+h2pvGhERiXciIyPp2rUr8+bNw83NjTt37sSonenT/x9ERo9WEImvFEZERCReiYyMpHPnzixYsAB3d3cWL15s9xgRMAeqvvee+frjj80viZ8URkREJN6IjIykU6dOLFy4EHd3d5YsWcJbb71ldztLl8I/q0DQqxd8+qlj6xTHUhgREZF4ITIyko4dO7Jo0SLc3d1ZunQpzWMw5eWHH8wBqlYrdO8OEydqmff4LkGtwCoiIolfsmTJWLZsWYyCyObN5jLvkZFmIJkxQ0EkIdBsGhERiTciIyM5ePAgFSpUsPvaXbugbl148MDciXfpUkim/n+HctbvUPWMiIiIy0RERDBjxgwiIyMBc1XtmASRX36BN94wg0j9+ubqqgoiCYfCiIiIuER4eDht2rShR48evP322zFu5+hRs0ckOBhq1oSVK8HT03F1ivMpN4qISJwLDw+ndevWrFy5Eg8PDxo2bBijdk6ehFq14M4dqFzZHLzq7e3YWsX5FEZERCROhYeH06pVK77//ns8PT1ZuXIlDRo0sLudP/8EX1+4dQvKloX16+GFF5xQsDidHtOIiEicCQ8Pp2XLlrYgsmrVKruDiGGYG92VLQvXrkHx4rBpE6RJ46SixekURkREJM60b9+eVatW4enpyerVq6lfv75d19+4AU2bQseOEBQEr74KW7dChgzOqVfihsKIiIjEmXbt2pE6dWrWrFlDvXr17Lp29WooVgzWrAEPD/jsM9i9G/7Z6F0SMI0ZERGROFOvXj0uXLhA+vTpo33NnTvw4YewYIH5vnhxWLgQSpZ0To0S99QzIiIiThMaGkr37t05e/as7Zg9QWTbNjN8LFgAbm4wcCD8+quCSGKjnhEREXGK0NBQmjVrxvr169m1axcnTpwgWTRXIrt/Hz76CKZONd8XKGAOWq1c2YkFi8sojIiIiMM9fPiQZs2asWHDBpInT85XX30V7SCyfz+0bw9nzpjv330Xxo2DlCmdWLC4lB7TiIiIQz18+JCmTZvagsi6deuoWbPmc68LC4MhQ6BSJTOIZM9uTtn96isFkcROPSMiIuIwDx8+pEmTJmzatInkyZOzfv16fHx8nnvd779Du3bm0u4AbdqYj2jSpXNuvRI/qGdEREQcZtCgQWzatIkUKVKwYcOG5waRyEgYPx7KlDGDSIYMsGIFLFqkIJKUKIyIiIjDfPLJJ1SpUoUNGzZQvXr1Z54bGAg+PjBggPmI5o03zB6S5s3jplaJP/SYRkREYiUyMhJ3d3fAnLb7008/YbFYnnnNw4fQqBHs2WOOB5kyBTp1gudcJomUekZERCTG7t+/T506dfjqq69sx54XRKxWc3zInj3mfjI//wydOyuIJGUKIyIiEiMhISE0aNAAPz8/PvroI27cuBGt6/r1g5UrzSXdV682FzWTpE2PaURExG4hISG88cYb7Ny5k5QpU7Jx40YyZ8783OsmTTK/AObNM8eMiCiMiIiIXUJCQqhfvz67du0iVapUbNq0iUqVKj33uhUroG9f8/XYsdC6tZMLlQRDYURERKItODiY+vXr89NPP5E6dWo2b97Mq6+++tzrdu82x4kYBvTsCf37x0GxkmAojIiISLR9//33tiCyZcsWKlSo8Nxr/vjDnDkTGmr+d8oUDVaVqBRGREQk2jp06EBAQAA+Pj6UL1/+uedfuwZ168Lff8Orr8KSJfDPLGARG4thGIari4hLQUFBpEmThrt375I6dWpXlyMiEu/du3cPi8VCSjs3iLl3D6pVgyNHoFAhcwpvxoxOKlLihLN+h2pqr4iIPFVQUBB16tShfv36hISERPu68HB4800ziGTKBBs3KojI0ymMiIjIE929e5c6deqwb98+jh8/zoULF6J1nWHA22/D5s2QIgWsXw8FCji5WEnQNGZEREQe8yiI7N+/n3Tp0rFt2zaKFSsWrWtHjIBvvwU3N1i+HMqVc3KxkuCpZ0RERKK4c+cOtWvXZv/+/aRPnx4/Pz9Kly4drWvnzDHDCMCMGVC/vhMLlURDPSMiImLzKIj8+uuvZMiQAT8/P0qWLBmtazduNB/PAAwZAt27O7FQSVTs7hk5f/68M+oQEZF44MqVK5w7d87uIHLokDlgNTIS2reHkSOdXKgkKnaHkYIFC+Lj48OiRYt4+PChM2oSEREXefnll/Hz82P79u3RDiIXLpiPY0JCoFYtmD1bi5qJfewOI4cPH6ZEiRL06dOHrFmz8vbbb3PgwAFn1CYiInEgMDAwyt/jpUqVokSJEtG69vZtc1Gz69ehZElzN15PT2dVKomV3WGkVKlSTJkyhatXrzJ37lyuXbtGlSpVKFasGBMnTuTmzZvOqFNERJwgMDAQX19fatasyd69e+26NjIS3noLTp+GXLlgwwbQWpISEzGeTZMsWTKaNm3KihUrGDt2LGfPnqVfv37kypWL9u3bc+3aNUfWKSIiDnb79m1q1qzJkSNHSJEiBWnTprXr+gkTYPt2cy2RjRshe3bn1CmJX4zDyMGDB+nRowfZsmVj4sSJ9OvXj3PnzrF161auXr1Ko0aNHFmniIg40K1bt6hZsyZHjx4lS5Ys7Nixg5dffjna1x86ZM6YAfjyS7DjUpHH2D21d+LEiXz77becPn2aevXqsWDBAurVq4ebm5lr8uXLx7x588ibN6+jaxUREQe4efMmNWvW5Pjx47Yg8uKLL0b7+pAQaN0aIiKgWTPo3NmJxUqSYHcYmTFjBp07d6Zjx45ky5btiedkzpyZOXPmxLo4ERFxrEePZo4fP07WrFnZsWMHRYsWtauNXr3gzBnImRNmzdLMGYk9u8PI1q1byZ07t60n5BHDMLh8+TK5c+fG09OTDh06OKxIERFxjJQpU5InTx5u3brFjh07KFKkiF3Xr1oF33xjBpAFCyB9eicVKkmK3WGkQIECXLt2jcyZM0c5HhgYSL58+YiMjHRYcSIi4lheXl6sXLmSa9eu2f04/coV6NbNfD1gAPj4OL4+SZrsHsBqGMYTjwcHB+Pt7R3rgkRExLGuX7/O2LFjbX9/e3l52R1ErFZzZdXAQChTRiusimNFu2ekT58+AFgsFoYOHUqKFClsn0VGRrJ//35KlSrl8AJFRCTmAgICqFGjBn/88QehoaEMHTo0Ru38exrvkiVa2EwcK9ph5MiRI4DZM3L8+HE8//Un0dPTk5IlS9KvXz/HVygiIjFy7do1atSowalTp8iZMydt2rSJUTv/ncZbuLADixTBjjCyY8cOADp16sSUKVNIrWX2RETirWvXruHj48Pp06fJlSsXO3bsoECBAna382gab3g4NG2qabziHHYPYP3222+dUYeIiDjI1atX8fHx4cyZM+TOnZsdO3aQP3/+GLXVu7c5jTdHDm2AJ84TrTDStGlT5s2bR+rUqWnatOkzz121apVDChMREfuFhYXh6+vLmTNnyJMnDzt27CBfvnwxamv16v8HkIULNY1XnCdaYSRNmjRY/onDadKkcWpBIiISc56engwZMoRPPvkEPz+/GK+GfeUKdO1qvu7fX9N4xbksxtPm6iZSQUFBpEmThrt372rci4gkWg8fPozxcgtWK9SuDX5+5jTen3/W7BkxOet3aIw3yhMRkfjB39+funXrcvXqVdux2Kz79MUXZhBJkQIWL1YQEeeL1mOaV155xfaY5nkOHz4cq4JERCT6/P39qV69OhcuXKBr165s2LAhVu0dPgyDB5uvp0wBO1eLF4mRaIWRxo0bO7kMERGx16VLl/Dx8eHChQsUKFCAr7/+Olbt/Xsab5Mm0KWLgwoVeY5ohZFhw4Y5uw4REbHDxYsX8fHx4eLFixQoUICdO3eSM2fOWLXZpw+cPq1pvBL3NGZERCSBuXDhAtWrV+fixYsUKlSIXbt2xTqIrFkDs2b9fzfeDBkcU6tIdESrZyR9+vScOXOGjBkzki5dumeOHwkMDHRYcSIi8rju3btz6dIlChcuzPbt28mRI0es2rt69f/TePv1gxo1HFCkiB2iFUYmTZpEqlSpAJg8ebIz6xERkeeYN28e77zzDl9//TXZs2ePVVuRkdChA9y+DaVLw6efOqhIETtonRERkQTgwYMHJE+e3KFthoVB+/awbJk5jffwYc2ekWdz1u9Qu/emAYiMjGT16tX88ccfALz00ks0atSIZMli1JyIiDzD2bNn8fX1ZcyYMbRq1cohbYaEmBvfbdkCHh7mcu8KIuIqdveMnDhxgoYNGxIQEECRf/7knjlzhkyZMrF27VqKFSvmlEIdRT0jIpKQ/Pnnn/j4+HDlyhWKFy/OoUOH8PDwiFWbgYFQvz788ovZI7JqFdSp46CCJVGLNyuwdu3alZdffpm//vqLw4cPc/jwYS5fvkyJEiXo3r27wwoTEUnqzpw5Q/Xq1bly5QovvfQSW7dujXUQuXoVqlUzg0i6dOZKqwoi4mp2P1c5evQoBw8eJF26dLZj6dKlY/To0ZQrV86hxYmIJFWnT5/Gx8eHa9eu8fLLL+Pn50eWLFli1ebZs1CrFly8CNmymY9o4nlntiQRdveMFC5cmOvXrz92/MaNGxQsWNAhRYmIJGWnTp2yBZFixYqxffv2WAeRY8egShUziBQsCHv3KohI/BGtMBIUFGT7GjNmDB988AErV67kr7/+4q+//mLlypX06tWLsWPHOrteEZFE77vvvuPatWsUL16c7du3kzlz5li1t3u3+Wjm+nUoWRL27IF8+RxUrIgDRGsAq5ubW5SFzh5d8ujYv99HRkY6o06H0QBWEYnvDMNg/PjxdOrUiUyZMsWqrfXroXlzePjQ7BlZuxbSpnVMnZL0uHRq744dOxz2DUVE5HHnz58nR44ceHl5YbFYGDBgQKzbXLzYXNAsMtKcPbN8uTl7RiS+iVYYqVatmrPrEBFJsn7//Xdq1qxJhQoVWLFiBV5eXrFuc+pU+OAD83XbtjB3rrmeiEh8FKNVyu7cucOcOXNsi569/PLLdO7cmTRp0ji0OBGRxO7333+nRo0a3Lx5k7/++ov79+/HKowYBgwfDiNHmu8/+AAmTQI3bYsq8ZjdfzwPHjxIgQIFmDRpEoGBgQQGBjJx4kQKFCjA4cOHnVGjiEiidPz4cXx8fLh58yalS5dm27ZtUZZNsJfVCu+///8gMnIkTJ6sICLxn90rsFatWpWCBQsye/Zs2/LvERERdO3alfPnz/PTTz85pVBH0QBWEYkPjh07Rs2aNbl9+zZlypRhy5YtpE+fPsbthYVBx47w3XdgscC0adCjh+PqFQHn/Q61O4wkT56cI0eOULRo0SjHT548SdmyZbl//77DinMGhRERcbV/B5GyZcuyZcuWWPWI3L9vzpjZuBGSJYMFC8BBW9iIRBFvloNPnTo1/v7+jx2/fPkyqVKlckhRIiKJWXBwMA8fPqRcuXJs3bo1VkEkPBwaNTKDSPLk5tRdBRFJaOwewNqiRQu6dOnChAkTqFSpEgB79+6lf//+DttNUkQkMatcuTI7duygUKFCpI3Foh+GYY4R2bYNXngBNm+GypUdV6dIXLE7jEyYMAGLxUL79u2JiIgAwMPDg3fffZfPP//c4QWKiCQGhw8fxs3NjVKlSgE4ZC+vqVPh66/NMSJLliiISMJl15iRyMhI9u7dS/HixfHy8uLcuXMAFChQgBQJZCUdjRkRkbh26NAhfH19cXNzY/fu3bz00kuxbnPjRnjjDXMGzbhx0L+/AwoVeY54MWbE3d2d2rVrc+fOHVKkSEHx4sUpXrx4ggkiIiJx7eDBg/j6+nLnzh2KFi1Kzpw5Y93miRPQooUZRDp1gn79HFCoiAvZPYC1WLFinD9/3hm1iIgkKr/++qstiFSuXJlNmzbF+l+TN29CgwZw7x689hrMnGk+phFJyOwOI59++in9+vVj3bp1XLt2LcqOvkFBQc6oUUQkwTlw4AC1atXi7t27VKlShY0bN8Z6xmFoKDRtChcuQP788P334OnpoIJFXMjudUbc/rWU33938tWuvSIi5joir732GkFBQVStWpUNGzaQMmXKWLVpGNC5M8ybB6lTwy+/wIsvOqZekehy6a69/+boHXynT5/O+PHjCQgIoGTJkkydOpXy5cs/9fw7d+4wePBgVq1aRWBgIHny5GHy5MnUq1fPoXWJiMRUgQIFKFmyJBaLhfXr18c6iACMH28GETc3c/ddBRFJTOwKI4ZhkD17dsLCwihSpIhtOfiYWrZsGX369GHmzJlUqFCByZMnU6dOHU6fPk3mzJkfOz8sLIxatWqROXNmVq5cSY4cObh06VKs5umLiDhaypQp2bBhAxaLhRdeeCHW7a1ZAx99ZL6eMgXq1Il1kyLxSrQf01y4cIGGDRty8uRJAHLmzMn3339P2bJlY/zNK1SoQLly5Zg2bRoAVquVXLly8f777/PRo//n/cvMmTMZP348p06dwiOGe2HrMY2IOMPevXvZs2cPAwcOdGi7R4+a64fcv2/uNTN9ukObF7GLy6f29u/fn4iICBYtWsTKlSvJmTMnb7/9doy/cVhYmG3uva0YNzd8fX3Zt2/fE6/58ccfqVixIj179iRLliwUK1aMzz777JnjVEJDQzXIVkScas+ePbz++ut89NFHLFq0yGHtBgSYM2fu3wdfX7NXRCQxivZzlj179rBy5UqqVKkCwKuvvkrOnDkJCQmJUTfkrVu3iIyMJEuWLFGOZ8mShVOnTj3xmvPnz7N9+3batGnDhg0bOHv2LD169CA8PJxhw4Y98ZoxY8YwYsQIu+sTEYmO3bt3U7duXUJCQqhZsyZNmzZ1SLsPHph7zvz1FxQpAitWmJvgiSRG0e4ZuXHjBoUKFbK9z5YtG8mTJ+fGjRtOKexJrFYrmTNnZtasWZQpU4YWLVowePBgZs6c+dRrBg0axN27d21fly9fjrN6RSRx++mnn2xBxNfXl7Vr1zpkEchHM2cOHID06WHdOtDQOEnMop2zLRYLwcHBJE+e3HbMzc2Ne/fuRXn0Ed1nSBkzZsTd3Z3r169HOX79+nWyZs36xGuyZcuGh4cH7u7utmMvvvgiAQEBhIWF4fmECfdeXl54eXlFqyYRkejatWsX9erV4/79+9SqVYsffvghyt+PsTFqFCxdavaEfP89FCzokGZF4q1o94wYhkHhwoVJly6d7Ss4OJhXXnmFdOnSkTZtWru2wfb09KRMmTL4+fnZjlmtVvz8/KhYseITr6lcuTJnz57FarXajp05c4Zs2bI9MYiIiDhDQEAA9evX5/79+9SpU8ehQWT5cnj01HnmTKhe3SHNisRr0e4ZcfT6IgB9+vShQ4cOlC1blvLlyzN58mRCQkLo1KkTAO3btydHjhyMGTMGgHfffZdp06bx4Ycf8v777/Pnn3/y2Wef8cEHHzi8NhGRp8maNStjx45l/fr1rFq1Cm9vb4e0e+AAdOhgvu7bF7p0cUizIvGe3SuwOtq0adNsi56VKlWKL7/8kgoVKgBQvXp18ubNy7x582zn79u3j969e3P06FFy5MhBly5dGDhwYJRHN8+iqb0iElOPVpp+xGq1RlmVOjb++gvKlTNn0Lzxhrm2SDT/WhOJM876HeryMBLXFEZEJCa2bt3K8OHDWbt2LenTp3do2/fuQbVqcOQIFC8Oe/dCLLexEXEKl68zIiKSVG3ZsoWGDRvy888/2x4bO0pwMNSrZwaRzJlh7VoFEUl6FEZERJ5h8+bNNGzYkIcPH9KgQQM+/fRTh7UdEgL168OePebU3Q0bIE8ehzUvkmAojIiIPMWmTZto1KgRoaGhNGrUiJUrVzpsqYD7982xIT/9BGnSwJYtUKaMQ5oWSXBiHEbOnj3L5s2befDgAWAO7BIRSSw2bNhgCyKNGzdm+fLlDltC4MEDaNgQdu40H8ls3mwOXhVJquwOI7dv38bX15fChQtTr149rl27BkCXLl3o27evwwsUEYlrYWFhfPDBB4SFhdGkSROWLVvmsCDy8KG5zLufH6RMCZs2wT8TCEWSLLvDSO/evUmWLBn+/v5Rlj1u0aIFmzZtcmhxIiKu4OnpycaNG3n33XcdHkSaNIGtW+GFF2DjRqhUySFNiyRodm+7tGXLFjZv3kzOnDmjHC9UqBCXLl1yWGEiInHt9u3bZMiQATD/Tvvqq68c1nZoKDRrZvaEpEhhDlb9Z99RkSTP7p6RkJCQJ24EFRgYqD1gRCTB+vHHH8mXLx8bNmxweNthYfDmm2YASZ4c1q+H115z+LcRSbDsDiNVq1ZlwYIFtvcWiwWr1cq4cePw8fFxaHEiInHhhx9+oHnz5ty7d4+lS5c6tO3wcGjRwlw/xNvb/K/2mxGJyu7HNOPGjaNmzZocPHiQsLAwBgwYwIkTJwgMDGTv3r3OqFFExGlWr17NW2+9RUREBC1btmTu3LkOazs8HFq2NJd29/KCH36AmjUd1rxIomF3z0ixYsU4c+YMVapUoVGjRoSEhNC0aVOOHDlCgQIFnFGjiIhTrFq1yhZEWrVqxcKFC0mWzO5/oz1RRAS0aQOrVoGnpxlIatd2SNMiiY72phGRJOn777+nRYsWREZG0qZNG+bNm+fQINKuHSxdCh4esHq1udKqSELnrN+hdv8/77fffnvicYvFgre3N7lz59ZAVhGJ99avX09kZCRt27Zl3rx50d75+3kiI6Fjx/8Hke+/VxAReR67w0ipUqVsW2g/6lT595baHh4etGjRgq+//hpvb28HlSki4lizZ8+mYsWKdO7c2aFBpHNnWLwYkiWD5cuhQQOHNC2SqNk9ZmT16tUUKlSIWbNmcezYMY4dO8asWbMoUqQIS5YsYc6cOWzfvp0hQ4Y4o14RkRj7+eefiYyMBMDd3Z1u3bo5LIhYrdC1KyxYAO7uZs9I48YOaVok0bO7Z2T06NFMmTKFOnXq2I4VL16cnDlz8sknn3DgwAFeeOEF+vbty4QJExxarIhITH333Xe0bduW1q1bO/SxDJhjRLp3h3nzzCDy3XfmAmciEj12h5Hjx4+T5wl7XOfJk4fjx48D5qOcR3vWiIi42pIlS2jXrh1WqxVPT88oj5Zj6+FDaN3aHKTq5gYLF5oLnIlI9Nn9mKZo0aJ8/vnnhIWF2Y6Fh4fz+eefU7RoUQCuXLlClixZHFeliEgMLVq0yBZEunbtyuzZs3Fzi/GG5VEEBUG9emYQ8fIyB6u2auWQpkWSFLt7RqZPn07Dhg3JmTMnJUqUAMzeksjISNatWwfA+fPn6dGjh2MrFRGx08KFC+nYsSNWq5Vu3boxc+ZMhwWRmzehbl04dAhSpTIXNNMi1CIxE6N1Ru7du8fixYs5c+YMAEWKFKF169akSpXK4QU6mtYZEUkaFixYQMeOHTEMg+7duzNjxgyHBRF/f3MBs9OnIWNGc/O7MmUc0rRIvBZv1hkBSJUqFe+8847DihARcbSMGTPi4eFB586dmT59usOCyKlTUKsW/PUX5MoFW7dCkSIOaVokyYrxcoMnT57E398/ytgRgIYNG8a6KBGR2KpXrx6//vorxYoVc1gQOXgQXn8dbt+GokVhyxYzkIhI7NgdRs6fP0+TJk04fvw4FovlsYXPHs3hFxGJa4sXL6Z8+fIUKlQIwDauzRG2b4dGjSA4GMqVgw0bzEc0IhJ7dv9z4cMPPyRfvnzcuHGDFClScOLECX766SfKli3Lzp07nVCiiMjzzZ49m7Zt2+Lj48P169cd2vbq1eZg1eBgc9ddPz8FERFHsjuM7Nu3j5EjR5IxY0bc3Nxwc3OjSpUqjBkzhg8++MAZNYqIPNOsWbPo3r07AM2aNSNz5swOa3vuXGjeHMLCoGlTWL/enD0jIo5jdxiJjIy0zZrJmDEjV69eBcxFz06fPu3Y6kREnuPrr7/m7bffBsye28mTJztsUbMJE6BLF3Op9y5dzL1mtA+oiOPZPWakWLFiHDt2jHz58lGhQgXGjRuHp6cns2bNIn/+/M6oUUTkiWbMmGFb06h379588cUXDgkihgGDBsHYseb7AQPg88/BgQu3isi/2B1GhgwZQkhICAAjR47kjTfeoGrVqmTIkIFly5Y5vEARkSdZtmyZLYj07duX8ePHOySIREbCO+/AN9+Y78eONcOIiDhPjBY9+6/AwEDSpUvn0P0enEWLnokkDoGBgfj6+lKzZk3GjRvnkL9/QkOhTRtzWXc3N5g1y3w8IyKmeLHoWXh4OMmTJ+fo0aMUK1bMdjx9+vQOK0hEJDrSp0/P7t27SZEihUOCyL175gDVbdvA0xOWLoUmTRxQqIg8l11hxMPDg9y5c2stERFxiS+//BJ3d3d69uwJwAsvvOCQdgMCoH59OHwYUqY095mpUcMhTYtINNg9m2bw4MF8/PHHBAYGOqMeEZEnmjRpEh9++CHvvfcev/zyi8Pa/fNPqFTJDCKZMsGOHQoiInHN7gGs06ZN4+zZs2TPnp08efI89i+Tw4cPO6w4ERGAiRMn0rdvX8D8B1GFChUc0u6BA2aPyK1bUKAAbN5s/ldE4pbdYaRx48ZOKENE5Mm++OIL+vXrB8Ann3zCiBEjHDJGZP16eOstuH8fypY13ztwrTQRsYNDZtMkJJpNI5JwjB8/ngH/zKsdNmwYw4cPd0i7c+dC9+7mNN7XX4cVK8yxIiLybM76HRqjrSzv3LnDN998w6BBg2xjRw4fPsyVK1ccVpiIJG379u2zBZHhw4c7JIgYBnz6qTldNzISOnSAH39UEBFxNbsf0/z222/4+vqSJk0aLl68SLdu3UifPj2rVq3C39+fBQsWOKNOEUliKlasyPDhw7FYLAwdOjTW7UVGwnvvwcyZ5vuPPzaDSQJYHkkk0bP7MY2vry+lS5dm3LhxpEqVimPHjpE/f35+/vlnWrduzcWLF51UqmPoMY1I/BYeHo6Hh4dD23zwAFq3hjVrzPAxdSr8MztYROwQbx7T/Prrr7ZNqf4tR44cBAQEOKQoEUmaPv30U3x9fQkODnZYm4GB4OtrBhEvL1i5UkFEJL6xO4x4eXkRFBT02PEzZ86QKVMmhxQlIknPqFGj+OSTT/jpp59Ys2aNQ9q8dAkqV4aff4a0aWHrVnOVVRGJX+wOIw0bNmTkyJGEh4cDYLFY8Pf3Z+DAgTRr1szhBYpI4jdixAjbuJAxY8bQtm3bWLf522/mYmanTkHOnLBnD1StGutmRcQJ7A4jX3zxBcHBwWTOnJkHDx5QrVo1ChYsSKpUqRg9erQzahSRROzfM2XGjh3LRx99FOs2d+wwg8fVq1CsGOzbBy+/HOtmRcRJ7J5NkyZNGrZu3cqePXv47bffCA4OpnTp0vj6+jqjPhFJpAzDYPjw4YwcORIw1xR5tLhZbCxbBu3bQ1gYvPaauc9M2rSxblZEnMju2TSXL18mV65czqrH6TSbRiR+CAgIoFixYty+fZsJEybYlnuPjalT4YMPzNfNmsGiReDtHetmReQf8WY2Td68ealWrRqzZ8/m77//dlghIpK0ZM2aFT8/P6ZPn+6QIPLFF/8PIu+9Z/aQKIiIJAx2h5GDBw9Svnx5Ro4cSbZs2WjcuDErV64kNDTUGfWJSCJiGAYXLlywvS9ZsiQ9evSIdbtjx8KjJzxDhsCXX4K7e6ybFZE4YncYeeWVVxg/fjz+/v5s3LiRTJky0b17d7JkyULnzp2dUaOIJAKGYTBo0CCKFy/Onj17HNbup5/CozGvI0bAqFFaVVUkoYnR3jRgTun18fFh9uzZbNu2jXz58jF//nxH1iYiiYRhGAwcOJCxY8cSEhLC8ePHHdAmDB8On3xivv/0U3DAqvEi4gIxDiN//fUX48aNo1SpUpQvX56UKVMyffp0R9YmIomAYRgMGDCA8ePHAzBt2jTefffdWLZpBo8RI8z3Y8fC4MGxrVREXMXuqb1ff/01S5YsYe/evRQtWpQ2bdrwww8/kCdPHmfUJyIJmGEY9OvXj4kTJwLw1VdfOSSIDBpkBhAwB6726RPbSkXElewOI59++imtWrXiyy+/pGTJks6oSUQSAcMw6NOnD5MnTwZgxowZvPPOO7FsE/r3NwMIwJQp/59BIyIJl91hxN/fH4tGh4nIc0RERHDu3DnA7FHt3r17rNozDOjd2wwgANOngwMm4ohIPGB3GLFYLNy5c4c5c+bwxx9/APDSSy/RpUsX0qRJ4/ACRSRh8vDwYMWKFWzfvp26devGqi2r1ewBeTQs7euvIZbZRkTikRitM1KgQAEmTZpEYGAggYGBTJo0iQIFCnD48GFn1CgiCYRhGHz//fc8WtjZy8vLIUGkRw8ziFgsMGeOgohIYmN3GOnduzcNGzbk4sWLrFq1ilWrVnHhwgXeeOMNevXq5YQSRSQhMAyD9957j+bNm9PHQSNKrVYzeHz9tRlEvv0WtJyRSOJj92OagwcPMnv2bJIl+/+lyZIlY8CAAZQtW9ahxYlIwmC1WnnvvfeYMWMGFouFEiVKxLrNyEjo0gXmzwc3N1iwANq0cUCxIhLv2N0zkjp1avz9/R87fvnyZVKlSuWQokQk4bBarfTo0cMWRObOnUunTp1i1WZEBHToYAYRd3dYskRBRCQxszuMtGjRgi5durBs2TIuX77M5cuXWbp0KV27dqVVq1bOqFFE4imr1cq7777L119/jcViYd68eXTs2DFWbUZEQLt2sHgxJEsGS5dCixaOqVdE4ie7H9NMmDABi8VC+/btiYiIAMxR8++++y6ff/65wwsUkfirZ8+ezJo1C4vFwvz582nXrl2s2nvwANq3h5UrwcMDli+Hxo0dU6uIxF8W49Gwdzvdv3/ftoZAgQIFSJEihUMLc5agoCDSpEnD3bt3SZ06tavLEUnQlixZQseOHZk7dy5t27aNVVvHj0OrVnDiBHh6moGkQQMHFSoiDuGs36HRDiORkZGcOHGCQoUKkTx58iifPXjwgD///JNixYrh5hbj7W7ihMKIiGP5+/uTO3fuGF9vGDB1KgwYAKGhkCWL+YimZk0HFikiDuGs36HRTg4LFy6kc+fOeHp6PvaZh4cHnTt3ZsmSJQ4rTETin8jISIYMGcKVK1dsx2ITRK5fh/r14cMPzSBSvz789puCiEhSE+0wMmfOHPr164e7u/tjnz2a2jtr1iyHFici8UdkZCRdunRh9OjR1K5dm/Dw8Fi1t2EDlCgBGzeClxdMmwZr10LmzA4qWEQSjGgPYD19+jSvvvrqUz8vV66cbXl4EUlcIiMj6dy5MwsWLMDd3Z3hw4fj4eERo7YePoSBA+HLL833xYrBd9+Z/xWRpCnaYSQkJISgoKCnfn7v3j3u37/vkKJEJP6IjIykY8eOLFq0CHd3d5YuXUrz5s1j1Nbvv0Pr1uZgVTD3mxk7Fry9HViwiCQ40X5MU6hQIX7++eenfr5nzx4KFSrkkKJEJH6IjIykQ4cOLFq0iGTJkrFs2bIYBRHDMB/DlC1rBpHMmc3HNFOmKIiIiB1hpHXr1gwZMoTffvvtsc+OHTvG0KFDad26tUOLExHXGjx4MIsXL7YFkWbNmtndxo0b0LAhvP++OUi1bl1zkGos988TkUQk2lN7w8PDqV27Nnv27MHX15eiRYsCcOrUKbZt20blypXZunVrjJ8jxxVN7RWJvqtXr1K7dm1GjRpFkyZN7L5+82ZzWffr181BquPHw3vvmZveiUjC4/J1RsAMJJMmTWLJkiX8+eefGIZB4cKFad26Nb169XritN/4RmFE5NkMw8Dyr7QQERERZWPM6Hj4EAYNgsmTzfcvv2wOUi1e3IGFikicixdhJDFQGBF5uvDwcNq3b0+DBg1i/Nj12DFzSfdHT3Tfew/GjYP/rJUoIgmQyxc9E5HELTw8nFatWtk2vgwICLDr+osXzRDyyitmEMmUCdatM1dXVRARkWdRGBERwsPDadmyJd9//z2enp6sWLGCrFmzRuvaW7egd28oUgQWLjRnzrz1lhlI6td3cuEikijYvWuviCQuYWFhtGzZktWrV+Pl5cXq1aupG42pLiEhMGmS+Qjm3j3zWI0a5rohZcs6uWgRSVQURkSSsLCwMN566y1++OEHvLy8WLNmDa+//vozrwkPh2++gREjzFkyYD6a+fxzqFVLM2VExH4KIyJJ2KJFi2xB5IcffqBOnTpPPddqhRUrYMgQOHvWPJY/P4webT6WiecbdotIPBatMNKnT59oNzhx4sQYFyMicatTp0788ccf1KpVi9q1az/1vG3b4KOP4NAh833mzDB0KHTrBglgRr+IxHPRCiNHjhyJ8v7w4cNERERQpEgRAM6cOYO7uztlypRxfIUi4lChoaEAeHl5YbFYGD9+/FPPPXzYDCFbt5rvU6aE/v2hTx/ztYiII0QrjOzYscP2euLEiaRKlYr58+eTLl06AP7++286depE1apVnVOliDjEw4cPadasGW5ubqxcuRIvL68nnnf2rPk4Ztky872HB7z7LgwebPaKiIg4kt2LnuXIkYMtW7bw8ssvRzn++++/U7t2ba5everQAh1Ni55JUvXw4UOaNGnCpk2bSJ48OXv27KF06dJRzvnjD3Mg6uLFEBlpDkZt0wZGjoR8+VxUuIjEG876HWr3ANagoCBu3rz52PGbN29y79H8PhGJVx4+fEjjxo3ZvHkzyZMnZ/369VGCyMGDMGYMrF5trhMC5kZ2Y8ZAyZIuKlpEkgy7x783adKETp06sWrVKv766y/++usvvv/+e7p06ULTpk2dUaOIxMKDBw9o1KgRmzdvJkWKFGzYsAEfHx8MA3btgjp1oFw5WLXKDCJNmsCBA7Bhg4KIiMQNu8PIzJkzqVu3Lq1btyZPnjzkyZOH1q1b8/rrr/PVV1/FqIjp06eTN29evL29qVChAgcOHIjWdUuXLsVisdC4ceMYfV+RxO5RENmyZYstiFSrVp3166FKFaheHbZsAXd3aNcOfv/dDCXlyrm6chFJSmK8UV5ISAjnzp0DoECBArzwwgsxKmDZsmW0b9+emTNnUqFCBSZPnsyKFSs4ffo0mZ8xUu7ixYtUqVKF/Pnzkz59etasWROt76cxI5KUHD16lCpVqgCwdu0Gbtx4jTFjzM3sALy8oHNnc4aMxoSIyPPEu117z549y7lz53jttddInjz5Y9uOR1eFChUoV64c06ZNA8BqtZIrVy7ef/99PvrooydeExkZyWuvvUbnzp3ZvXs3d+7cURgReYodO/awaZPB6tVV+fNP81jKlObsmN69IVs219YnIglHvBnAevv2bd566y127NiBxWLhzz//JH/+/HTp0oV06dLxxRdfRLutsLAwDh06xKBBg2zH3Nzc8PX1Zd++fU+9buTIkWTOnJkuXbqwe/fuZ36P0NBQ27oKYN5IkcTs/v37XLp0idy5X+Sbb2DChCr89Zf5Wfr08OGH8N575msRkfjA7jEjvXv3xsPDA39/f1KkSGE73qJFCzZt2mRXW7du3SIyMpIsWbJEOZ4lS5anbl++Z88e5syZw+zZs6P1PcaMGUOaNGlsX7ly5bKrRpGEJCQkhDfeeINy5aqQM+cxevWCv/4yez+++AIuXTJXTlUQEZH4xO6ekS1btrB582Zy5swZ5XihQoW4dOmSwwp7knv37tGuXTtmz55NxowZo3XNoEGDoixnHxQUpEAiiVJISAh16tRn795dQCrgPvnzw8CB0KGDOT5ERCQ+sjuMhISEROkReSQwMPCpqzk+TcaMGXF3d+f6o60//3H9+nWyZs362Pnnzp3j4sWLNGjQwHbMarUCkCxZMk6fPk2BAgWiXOPl5WV3XSIJTXBwMK+9Vp8jR34CUuPuvplx417lgw8gmbbDFJF4zu7HNFWrVmXBggW29xaLBavVyrhx4/Dx8bGrLU9PT8qUKYOfn5/tmNVqxc/Pj4oVKz52ftGiRTl+/DhHjx61fTVs2BAfHx+OHj2qHg9JkoKC7lGyZF1bEMmWbQv79r1Knz4KIiKSMNj9V9W4ceOoWbMmBw8eJCwsjAEDBnDixAkCAwPZu3ev3QX06dOHDh06ULZsWcqXL8/kyZMJCQmhU6dOALRv354cOXIwZswYvL29KVasWJTr06ZNC/DYcZGk4NKle5QuXZfAwL1AGnx8trBqVXn++b+FiEiCYHcYKVasGGfOnGHatGmkSpWK4OBgmjZtSs+ePckWgzmCLVq04ObNmwwdOpSAgABKlSrFpk2bbINa/f39cXOzuwNHJNH75Rd48003AgPdgTT077+VsWPLEYMZ9iIiLmX3OiP+/v7kypXriWuK+Pv7kzt3bocV5wxaZ0QSOqsVJk6EQYMgIgLy5QtmzJgLtGhR3NWliUgi56zfoXZ3OeTLl++JG+Xdvn2bfFrCUcSpbt2CunXv0r//HCIioEULOHo0pYKIiCRodj+medpKq8HBwXh7ezukKBF53J490KLFXa5erQPsp2nTQL77rr8ey4hIghftMPJorQ6LxcInn3wSZXpvZGQk+/fvp1SpUg4vUCSps1ph7FgYMuQOVmsd4ABp0qRnyBBfBRERSRSiHUaOHDkCmD0jx48fx9PT0/aZp6cnJUuWpF+/fo6vUCQJu3HD3E13y5Y7QG3gV9KlS8/27X4K/yKSaEQ7jOzYsQOATp06MWXKFA3+FHGynTuhdWu4du1vLJbaGMZBMmTIgJ+fHyVLlnR1eSIiDmP3ANbJkycTERHx2PHAwEBtQifiIHPmQM2acO1aON7eZhDJmDEj27dvVxARkUTH7jDSsmVLli5d+tjx5cuX07JlS4cUJZKUjRsHXbuaY0XatfNg9OiOZMqUie3bt1OiRAlXlyci4nB2rzOSPn169u7dy4svvhjl+KlTp6hcuTK3b992aIGOpnVGJL4yDHNTu/HjzfcDB8KYMWCxwJ07d2yrDYuIuEq8WWckNDT0iY9pwsPDefDggUOKEklqIiKgW7dHQeQ2pUu3p3//27bZMgoiIpKY2R1Gypcvz6xZsx47PnPmTMqUKeOQokSSkocPzcXL5swBi+UWuXLV5PDhhbRr187VpYmIxAm7Fz379NNP8fX15dixY9SsWRMAPz8/fv31V7Zs2eLwAkUSs3v3oHFj2L4dPDxukT17TS5d+o2sWbMyceJEV5cnIhIn7O4ZqVy5Mvv27SNnzpwsX76ctWvXUrBgQX777TeqVq3qjBpFEqVbt6BGDTOIvPDCTXLmrGELIjt27KBo0aKuLlFEJE7YPYA1odMAVokPLl+G2rXh1ClIl+4G6dPX5Ny538mWLRs7duygSJEiri5RROQx8WYAK8C5c+cYMmQIrVu35saNGwBs3LiREydOOKwwkcTq9GmoXNkMIjlzQrFiHTh37neyZ8/Ozp07FUREJMmxO4zs2rWL4sWLs3//fr7//nuCg4MBOHbsGMOGDXN4gSKJyaFDUKWK2TNSpAjs3Qtz506lQoUK7Ny5k8KFC7u6RBGROGd3GPnoo4/49NNP2bp1a5T9aWrUqMEvv/zi0OJEEpOdO8HHxxwrUqZMJLt3Q+7cULBgQfbt20ehQoVcXaKIiEvYHUaOHz9OkyZNHjueOXNmbt265ZCiRBKbNWvg9dfN2TOVKl0jLKwMv/66wfa5RdvvikgSZncYSZs2LdeuXXvs+JEjR8iRI4dDihJJTObNg2bNIDQU6tS5ys2b1Tl+/Bi9evUiPDzc1eWJiLhcjPamGThwIAEBAVgsFqxWK3v37qVfv360b9/eGTWKJFgTJ0KnTuY+My1aXOH8+er8+ecZcufOzaZNm/Dw8HB1iSIiLmd3GPnss88oWrQouXLlIjg4mJdeeonXXnuNSpUqMWTIEGfUKJLgGAYMGgR9+5rv3377CocP+/Dnn3+SJ08edu7cSf78+V1bpIhIPBHjdUb8/f35/fffCQ4O5pVXXkkwg++0zog4W3i4uevuggXm+0GD/mLFCh/Onj1rCyJ58+Z1aY0iIjHhrN+hdi8H/0ju3LnJlSsXoMF3Io8EB0Pz5rB5M7i7w+zZcObMdM6ePUvevHnZsWOHgoiIyH/EaNGzOXPmUKxYMby9vfH29qZYsWJ88803jq5NJEG5fh2qVzeDSIoU8OOP5niRTz/9lP79+6tHRETkKezuGRk6dCgTJ07k/fffp2LFigDs27eP3r174+/vz8iRIx1epEh8d/asOXX33DnImBEWLLhO7doZAXfc3d0ZN26cq0sUEYm37B4zkilTJr788ktatWoV5fh3333H+++/H+/XGtGYEXG0X3+F+vXh5k3Inx/mzr1Ep04+VK5cmXnz5uHu7u7qEkVEHCLejBkJDw+nbNmyjx0vU6YMERERDilKJKHYuNEcI3L/PpQuDTNnXuStt3y4ePEibm5u3L59m8yZM7u6TBGReM3uMSPt2rVjxowZjx2fNWsWbdq0cUhRIgnBvHnQoIEZRGrXhnnzLvDmm9W5ePEihQoVYteuXQoiIiLREKPZNHPmzGHLli28+uqrAOzfvx9/f3/at29Pnz59bOdNnDjRMVWKxCOGAWPGwODB5vt27eDjj89Tp44P/v7+FCpUiB07dmhFYhGRaLI7jPz++++ULl0agHPnzgGQMWNGMmbMyO+//247T9N9JTGKjIQPPoCvvjLfDxwI3bqdx8enOpcvX6Zw4cLs2LGD7Nmzu7JMEZEExe4wsmPHDmfUIRLvPXgAbdvCqlVgscCUKfD++7B16zmuX79OkSJF2LFjB9myZXN1qSIiCYrdY0Zu3rz51M+OHz8eq2JE4qu//zbHhaxaBZ6esHy5GUQAatWqxfr16xVERERiyO4wUrx4cdavX//Y8QkTJlC+fHmHFCUSn/j7Q5UqsGcPpEkDW7ZAyZJ/8ueff9rO8fX1VRAREYkhu8NInz59aNasGe+++y4PHjzgypUr1KxZk3HjxrFkyRJn1CjiMsePQ6VKcPIk5MgBu3dD9ux/Ur16dXx8zP1mREQkduwOIwMGDGDfvn3s3r2bEiVKUKJECby8vPjtt99o0qSJM2oUcYlNm6BqVbhyBV56CfbtA0/P01SrVo2rV6+SNm1aLZwnIuIAMdqbpmDBghQrVoyLFy8SFBREixYtyJo1q6NrE3EJw4DPP4d69eDuXfMRze7dcP/+aXx8fLh27RrFihVj+/btWkdERMQB7A4je/fupUSJEvz555/89ttvzJgxg/fff58WLVrw999/O6NGkTgTEgItW8KgQWYo6dYNtm2DGzdOUb16da5du0bx4sUVREREHMjuMFKjRg1atGjBL7/8wosvvkjXrl05cuQI/v7+FC9e3Bk1isSJCxfM8SHLl0OyZDBzJsyaBRcvnqZ69eoEBARQokQJtm/fTqZMmVxdrohIomH3OiNbtmyhWrVqUY4VKFCAvXv3Mnr0aIcVJhKX/PzgrbcgMBCyZIGVK83HM2BuDpk9e3ayZs3Ktm3byJgxo2uLFRFJZOzetTeh06698m+GAZMnQ79+YLVCuXLmWiI5c0Y9LzAwEMMwyJAhg0vqFBGJD5z1OzTaj2nq1avH3bt3be8///xz7ty5Y3t/+/ZtXnrpJYcVJuJsDx5A+/bQp48ZRDp0gJ9+MoPI77//zuzZs23npk+fXkFERMRJoh1GNm/eTGhoqO39Z599RmBgoO19REQEp0+fdmx1Ik7yaCGzRYvA3R2+/BK+/Ra8vc2VhH18fOjevTtLly51dakiIoletMeM/PdpThJ7uiOJyK5d8OabcPMmZMwIK1ZA9ermZ7/99hs1a9bk1q1blClThtq1a7u0VhGRpCBG64yIJESGAdOmga+vGUReeQUOHvx/EDl27Bg1atTg1q1blC1blq1bt5I+fXqX1iwikhREO4xYLBYsFstjx0QSgocPoWtXc3O7iAho3drcayZPHvPzo0ePUqNGDW7fvk25cuXYunUr6dKlc23RIiJJhF2PaTp27IiXlxcADx8+5J133uGFF14AiDKeRCQ+uXIFmjWD/fvBzQ3GjTMHrT7K0tevX6dmzZoEBgZSvnx5tmzZQpo0aVxbtIhIEhLtMNKhQ4co79u2bfvYOe3bt499RSIOtGePOT4kIADSpYNly6BWrajnZMmShV69erF+/Xo2b96sICIiEse0zogkShER8OmnMGqUOW23eHFYswby53/6NaGhobaePxEReZzL1xkRSSguXIBq1WDECDOItGsHP/8cNYj8+uuvNGrUiODgYNsxBREREddQGJFEZckSKFXKDB+pU5vvFyyAlCn/f86BAweoVasWP/74I0OHDnVZrSIiYrJ7bxqR+CgoCHr2NBcxA6hc2XydN2/U8/bv30/t2rUJCgqiSpUqjBgxIs5rFRGRqNQzIgnevn1mb8iiReZsmeHDYefOx4PIL7/8YgsiVatWZePGjaRKlSruCxYRkSjUMyIJVkQEfPYZjBwJkZFm+Fi8GCpVevzcffv2UadOHe7du8drr73G+vXrSfnvZzciIuIyCiOSIF28CG3bwt695vs2bWD6dHjSrNzw8HDatm3LvXv3qF69OuvWrbOtjyMiIq6nxzSS4Hz3HZQsaQaRVKnMxzOLFj05iAB4eHiwatUqmjdvriAiIhIPaZ0RSTCCguC992DhQvN9xYpmCHna2iEPHjwgefLkcVegiEgip3VGJEn75RdzY7uFC81BqkOHwk8/PT2I7N69m/z587Nnz564LVREROymMCLxmmHA6NFQpQqcP29ubLdrl7mgWbKnjHj66aefqFu3LgEBAUycODFuCxYREbspjEi8NngwDBlizpZp1QqOHjWDydPs2rWLunXrEhISQu3atVm8eHGc1SoiIjGjMCLx1owZMGaM+Xr6dHPabtq0Tz9/586d1KtXj/v371OnTh3WrFmjMSMiIgmAwojESz/+aA5WBXMdkR49wGJ5+vnbt2+3BZHXX39dQUREJAFRGJF4Z/9+aNnS3OSua1fzMc3zzJo1iwcPHlCvXj1Wr16Nt7e38wsVERGH0KJnEq+cPQtvvAEPHkC9euajmmf1iDwyf/58SpQoQd++fbX7rohIAqOeEYk3bt6E11+HW7egTBlYtuzpM2YA/vjjDx4tk+Pl5cXHH3+sICIikgApjEi8cP++2SNy7hzkywfr18Ozto7ZvHkzr7zyCh9++CFJbN0+EZFER2FEXC4iwhwjcuAApE8PGzdClixPP3/Tpk00atSI0NBQ/P39iYiIiLtiRUTE4RRGxKUMAz74ANauBW9v879Fijz9/I0bN9K4cWNCQ0Np3Lgxy5cvx8PDI+4KFhERh1MYEZcaO/b/g1QXL4ZKlZ5+7oYNG2xBpEmTJixfvhxPT8+4K1ZERJxCYURcZtEiGDTIfD1lCjRt+vRz161bR5MmTQgLC6NZs2YsW7ZMPSIiIomEwoi4hJ8fdO5svu7XD95//9nnBwcHExERQfPmzfnuu+8UREREEhGtMyJx7rffzF6Q8HBo0cJ8VPM8LVu2JHv27FSsWFFBREQkkVHPiMSpy5fNxcyCgqBaNZg/H9ye8qdw48aNXLlyxfb+tddeUxAREUmEFEYkzty5A3XrwpUr8NJLsHo1PG2NstWrV9OwYUN8fHy4efNmnNYpIiJxS2FE4kRoKDRpAidOQLZs5loi6dI9+dxVq1bx1ltvERERQdmyZUn3tBNFRCRRUBgRp7NaoVMn2LkTUqWCDRsgd+4nn/v999/bgkjr1q1ZsGAByZ61JryIiCR4CiPiVFYr9O4N331n7jPz/fdQqtSTz12xYgUtWrQgMjKStm3bKoiIiCQR+ptenCYsDDp2NIMIwDffQK1aTz537dq1tGrVisjISNq1a8e3336Lu7t7nNUqIiKuozAiThEUZE7f9fMze0S+/Rbatn36+WXKlKFAgQJUrFiROXPmKIiIiCQhCiPicNeumbNmjh0zd979/nuoXfvZ12TPnp29e/eSLl06BRERkSRGYUQc6vRpqFMHLl2CzJnNWTOlSz/53CVLlmAYBm3atAEgY8aMcVipiIjEFwoj4jD79sEbb0BgIBQqBJs2Qf78Tz538eLFtG/fHoACBQrw6quvxmGlIiISn2g2jTjEjz9CzZpmEClfHvbufXoQWbhwIe3bt8dqtdKlSxfKly8ft8WKiEi8ojAisTZ7trmg2YMH5lLv27dDpkxPPnf+/Pl06NABq9XK22+/zcyZM3F72nrwIiKSJMSL3wLTp08nb968eHt7U6FCBQ4cOPDUc2fPnk3VqlVJly4d6dKlw9fX95nni/MYBgwfDt27m+uJdO4MP/wAL7zw5PPnzZtHp06dMAyDd955h6+++kpBREREXB9Gli1bRp8+fRg2bBiHDx+mZMmS1KlThxs3bjzx/J07d9KqVSt27NjBvn37yJUrF7Vr146yoZo4X0SEGUJGjDDff/KJuY7I09Yo279/P507d8YwDN59912mT5+uICIiIgBYDMMwXFlAhQoVKFeuHNOmTQPAarWSK1cu3n//fT766KPnXh8ZGUm6dOmYNm2abUDkv4WGhhIaGmp7HxQURK5cubh79y6pU6d23A+ShNy/Dy1awLp15o67X30Fb7/97GsMw6B3796Eh4czbdo0LBZL3BQrIiIOExQURJo0aRz+O9Sls2nCwsI4dOgQgwYNsh1zc3PD19eXffv2RauN+/fvEx4eTvr06Z/4+ZgxYxjx6J/vEmu3bpkzZvbvB29vc3XVxo2ffr5hGFgsFiwWC5MmTQJQEBERkShc2k9+69YtIiMjyZIlS5TjWbJkISAgIFptDBw4kOzZs+Pr6/vEzwcNGsTdu3dtX5cvX4513UnVhQtQubIZRNKlg23bnh1EZs2aRaNGjWw9U49CiYiIyL8l6HVGPv/8c5YuXcrOnTvx9vZ+4jleXl54eXnFcWWJz5Ej5kyZgABzx91Nm+DFF59+/tdff80777wDwKJFi+jSpUscVSoiIgmNS3tGMmbMiLu7O9evX49y/Pr162TNmvWZ106YMIHPP/+cLVu2UKJECWeWmeRt3gzVqplBpEQJc3GzZwWRGTNm2IJI79696dy5cxxVKiIiCZFLw4inpydlypTBz8/PdsxqteLn50fFihWfet24ceMYNWoUmzZtomzZsnFRapI1axbUrw/37oGPD/z0E2TP/vTzv/rqK3r06AFA3759+eKLL/RoRkREnsnlcyv79OnD7NmzmT9/Pn/88QfvvvsuISEhdOrUCYD27dtHGeA6duxYPvnkE+bOnUvevHkJCAggICCA4OBgV/0IiZLVCh99ZM6SiYyEdu3MRzNp0jz9munTp9OzZ08A+vfvz/jx4xVERETkuVw+ZqRFixbcvHmToUOHEhAQQKlSpdi0aZNtUKu/v3+U9ShmzJhBWFgYzZs3j9LOsGHDGD58eFyWnmg9fAgdOsDy5eb74cNh6FB4Vq64ceOGLTQOGDCAzz//XEFERESixeXrjMQ1Z82RTixu3jRnyPz8M3h4mAuZPWH5lifas2cP27ZtY9iwYQoiIiKJkLN+hyqMiM2ZM+aMmXPnIG1aWL0aqld/9jW3bt0iY8aMcVGeiIi4mLN+h7p8zIjED7t3Q8WKZhDJm9fsGXleEJk4cSJFixbl6NGjcVChiIgkVgojwpIl4OsLgYFQvjz88suzp+6CObW6b9++3L59m02bNsVNoSIikigpjCRhhgGjR0ObNhAWBk2bwo4d8J8FcR8zfvx4+vfvD5gDh6Ozh5CIiMjTKIwkUeHh0LUrDBlivu/bF1asgBQpnn3d2LFjGTBgAADDhw/XDCYREYk1l0/tlbh39y40b27uLePmBlOnwj/rlD3TmDFj+PjjjwEYMWIEQ4cOdXKlIiKSFCiMJDGXLpkrqp44AS+8AMuWme+fJzw8nM2bNwMwatQohjzqUhEREYklhZEk5NAheOMNc4+ZbNlg/Xp45ZXoXevh4cG6detYvXo17dq1c26hIiKSpGjMSBKxYQO89poZRIoXh/37oxdEdu3aZXudMmVKBREREXE4hZEkYPNmaNIE7t+HOnVgzx7Ilev5140YMYLq1aszcuRI5xcpIiJJlsJIIrdrl7m8e1iYOWh17Vp43qJ5hmFE2evH29vb6XWKiEjSpTEjidgvv5hjRB4+NAepLl5s7jfzLI+CyKhRowAYN26cbU0RERERZ1AYSaSOHIHXX4fgYHN11ZUrwdPz2dcYhsHQoUP59NNPgf+vsioiIuJMCiOJ0MmTULu2uZ5IlSqwZg1E50nLJ598wujRowH44osv6NOnj3MLFRERQWEk0fnzT6hZE27dgrJlzem7L7wQvWuzZ88OwKRJk+jVq5fzihQREfkXi2EYhquLiEvO2v44Prh0CapWhcuXoUQJc5+Z9Onta+O3336jRIkSzilQREQSNGf9DtVsmkTi6lWzR+TyZShSBLZufX4QMQyDL7/8ktu3b9uOKYiIiEhcUxhJBG7eNAepnjsH+fKBnx9kzvzsawzDYMCAAXz44YfUqlWLsLCwuClWRETkPzRmJIH7+2+oVQv++ANy5jSDSI4cz77GMAz69+/PF198AUDXrl3xfN5UGxERESdRGEnAgoLM6bvHjkGWLGYQyZfv2dcYhkHfvn2ZNGkSADNmzOCdd96Jg2pFRESeTGEkgbp/31zQ7MAByJABtm2DwoWffY1hGPTu3ZspU6YAMHPmTN5+++04qFZEROTpFEYSoIcPzSXed+82l3bfvBmKFXv+daNGjbIFkVmzZtGtWzfnFioiIhINGsCawISHw1tvmbNlXngBNm6EMmWid23r1q3JlSsXs2fPVhAREZF4Qz0jCUhkJLRta2525+0NP/4IlSpF//qCBQvyxx9/8EJ0V0ETERGJA+oZSSCsVujaFZYvNze7W7UKatR43jVWevXqxbp162zHFERERCS+URhJID76CObNA3d3WLoU6tZ99vlWq5WePXsyZcoU3nzzTa5duxYndYqIiNhLj2kSgKlTYfx48/XcudC06bPPt1qtvPvuu8yaNQuLxcKsWbPIli2b8wsVERGJAYWReG71avjwQ/P16NHQvv2zz7darbzzzjvMnj0bNzc35s+fT9u2bZ1fqIiISAwpjMRjP/8MrVuDYcDbb8OgQc8+32q10r17d+bMmYObmxsLFiygTZs2cVOsiIhIDCmMxFOnT0ODBuaaIm+8AdOmgcXy7Gvmz59vCyILFy6kdevWcVOsiIhILCiMxEPXr5sDVAMDoVw5c8Bqsmj8L9W+fXt2795NrVq1aNWqlfMLFRERcQCFkXgmOBjq14cLFyB/fli3zlzc7GkiIyMBcHd3x93dnblz58ZRpSIiIo6hqb3xSEQEtGgBhw6Z+81s2gSZMz/9/MjISDp16kTHjh1toURERCShURiJJwwDevSADRvM1VXXrYNChZ5+fmRkJB06dGDhwoV89913/Prrr3FXrIiIiAMpjMQTo0fD7Nng5maOEXn11aefGxERQfv27Vm8eDHJkiVj2bJlvPqsC0REROIxjRmJB+bNg08+MV9/+SU0avT0cyMiImjXrh1Lly4lWbJkLF++nCZNmsRJnSIiIs6gMOJiW7bAow10Bw6Enj2ffm5ERARt27Zl2bJlJEuWjBUrVtC4ceM4qVNERMRZ9JjGhY4ehWbNzIGrrVvDZ589+/xjx46xevVqPDw8WLlypYKIiIgkCuoZcZFLl6BePXMqr4+PueeM23OiYZkyZVizZg0RERE0aNAgbgoVERFxMoURF/j7b3NRs2vXoFgxWLUKvLyefG54eDgBAQHkypULgLrP265XREQkgdFjmjj28CE0bgx//AE5cphTedOmffK54eHhtGzZkooVK3L27Nm4LFNERCTOKIzEIasVOnSAn36C1Klh40b4p8PjMWFhYbRo0YJVq1Zx8+ZNzp07F7fFioiIxBE9polDQ4bA8uXg4QGrV0Px4k8+LywsjLfeeosffvgBLy8v1qxZQ506deK2WBERkTiiMBJHduyAzz83X8+dCzVqPPm80NBQ3nzzTdauXYuXlxc//PCDgoiIiCRqCiNx4M4d8/GMYZhrirRt++TzQkNDad68OevWrcPb25sffviB2rVrx2mtIiIicU1hJA707AmXL0PBgjBx4tPPe/DgAVevXsXb25sff/yRWrVqxV2RIiIiLqIw4mRLl8KSJeDuDgsXQsqUTz83bdq0bN26lZMnT1KlSpW4K1JERMSFNJvGif76C95913w9ePCTN797+PAhP/zwg+19+vTpFURERCRJURhxEqsVOnY0x4uUK2fOpPmvhw8f0qRJExo3bsyMGTPiukQREZF4QY9pnOTLL8HPD1KkgEWLzOm8//bgwQMaN27Mli1bSJEiBS+++KJrChUREXExhREn+P13+Ogj8/UXX0DhwlE/f/DgAY0aNWLr1q2kSJGCDRs2UK1atbgvVEREJB7QYxoHCw01p+6GhkL9+vD221E/v3//Pg0bNmTr1q288MILbNy4UUFERESSNPWMONjQoXDsGGTMCN98AxbL/z+LiIigYcOG+Pn52YJI1apVXVesiIhIPKCeEQfatQvGjzdff/MNZM0a9fNkyZJRrVo1UqZMyaZNmxREREREAIthGIari4hLQUFBpEmThrt375I6dWqHtXv3LpQoAf7+0KWLGUae5vLly+R62g55IiIi8ZSzfoeqZ8RB3n/fDCL588OkSf8/HhISQt++fQkODrYdUxARERH5P40ZcYAVK8zVVd3czGm8qVKZx4ODg6lfvz4//fQTZ86cYe3ata4tVEREJB5SGImlK1f+P2Pm44+hYkXzdXBwMPXq1WP37t2kTp2aIU9a9UxERET0mCY2rFbo1An+/hvKlDFn0gDcu3ePunXrsnv3btKkScPWrVupUKGCa4sVERGJp9QzEgvTpsHWrZA8+f9XWX0URPbu3WsLIuXKlXN1qSIiIvGWekZi6ORJGDjQfD1+PBQtar5u27Yte/fuJW3atGzbtk1BRERE5DkURmIgLAzatIGHD+H116FHj/9/NmrUKAoVKsS2bdsoW7as64oUERFJIPSYJgaGDYOjRyFDBpg7F8AAzKVWS5QowcmTJ0mWTLdWREQkOtQzYqfdu2HsWPP1rFmQPPkdfH192bNnj+0cBREREZHo029NO9y9C+3bg2FAx45Qo8Ydateuza+//sr58+c5ffo0np6eri5TREQkQVEYiSar1dyN9+JFyJsXhg//m1q1anPw4EEyZMjAmjVrFERERERiQGEkmkaMgHXrwMsL5sz5m2bNanHo0CEyZsyIn58fJUqUcHWJIiIiCZLCSDT88AOMHGm+njgxkP79a3H48GEyZszI9u3bKV68uGsLFBERScA0gPU5Tp2Cdu3M1++/D5cujeXw4cNkypSJHTt2KIiIiIjEknpGnuHuXWjcGO7dg9degy++AMMYxc2bN+nbty8vv/yyq0sUERFJ8BRGnsJqNXtETp+G7NmDWL48FR4eFsCTuebiIiIiIuIAekzzFKNGwdq14Ol5kxQpqjB69IcYhuHqskRERBIdhZEn+PFHGD4c4AaZMtXg7NnjrFixgoCAABdXJiIikvgojPzHqVPmeiJwg/Tpa3Dlyu9ky5aNnTt3ki1bNleXJyIikugojPxLUBA0aQL37l0nRQofAgNPkD17dnbu3EmRIkVcXZ6IiEiipDDyD6vVXOr91KkAkiXz4f79k+TIkYOdO3dSuHBhV5cnIiKSaGk2zT9GjzYXN0uW7ABW62lbEClYsKCrSxMREUnUFEYwl3kfNsx8PWtWQ1KnXk7JkiUVREREROJAkg8jZ85Aq1bXMAwrPXrkoFMngGauLktERCTJSNJjRoKCoH79qwQHV8fbuzr9+l1xdUkiIiJJTpINI1YrvPXWFc6erQ6cIUOGMCDMxVWJiIgkPfEijEyfPp28efPi7e1NhQoVOHDgwDPPX7FiBUWLFsXb25vixYuzYcMGu7/n0KFX2Ly5OvAnWbPmYc+eXeTLly9mP4CIiIjEmMvDyLJly+jTpw/Dhg3j8OHDlCxZkjp16nDjxo0nnv/zzz/TqlUrunTpwpEjR2jcuDGNGzfm999/t+v7Tp1aHzhLxox5+eWXXeTNmzf2P4yIiIjYzWK4eMOVChUqUK5cOaZNmwaA1WolV65cvP/++3z00UePnd+iRQtCQkJYt26d7dirr75KqVKlmDlz5nO/X1BQEGnSpAEgVap8HD++gzx58jjopxEREUm8Hv0OvXv3LqlTp3ZYuy6dTRMWFsahQ4cYNGiQ7Zibmxu+vr7s27fvidfs27ePPn36RDlWp04d1qxZ88TzQ0NDCQ0Ntb2/e/cuAB4eudm1ay3p0qUjKCgolj+JiIhI4vfo96Wj+zFcGkZu3bpFZGQkWbJkiXI8S5YsnDp16onXBAQEPPH8p21iN2bMGEaMGPHY8fBwf0qXLhbDykVERJKu27dv254yOEKiX2dk0KBBUXpS7ty5Q548efD393fojZSnCwoKIleuXFy+fNmh3XrydLrncU/3PO7pnse9u3fvkjt3btKnT+/Qdl0aRjJmzIi7uzvXr1+Pcvz69etkzZr1iddkzZrVrvO9vLzw8vJ67HiaNGn0hzeOpU6dWvc8jumexz3d87inex733NwcO//FpbNpPD09KVOmDH5+frZjVqsVPz8/Klas+MRrKlasGOV8gK1btz71fBEREYnfXP6Ypk+fPnTo0IGyZctSvnx5Jk+eTEhICJ3Mddlp3749OXLkYMyYMQB8+OGHVKtWjS+++IL69euzdOlSDh48yKxZs1z5Y4iIiEgMuTyMtGjRgps3bzJ06FACAgIoVaoUmzZtsg1S9ff3j9IdVKlSJZYsWcKQIUP4+OOPKVSoEGvWrKFYsegNRvXy8mLYsGFPfHQjzqF7Hvd0z+Oe7nnc0z2Pe8665y5fZ0RERESSNpevwCoiIiJJm8KIiIiIuJTCiIiIiLiUwoiIiIi4VKIMI9OnTydv3rx4e3tToUIFDhw48MzzV6xYQdGiRfH29qZ48eJs2LAhjipNPOy557Nnz6Zq1aqkS5eOdOnS4evr+9z/jeRx9v45f2Tp0qVYLBYaN27s3AITIXvv+Z07d+jZsyfZsmXDy8uLwoUL6+8XO9l7zydPnkyRIkVInjw5uXLlonfv3jx8+DCOqk34fvrpJxo0aED27NmxWCxP3fft33bu3Enp0qXx8vKiYMGCzJs3z/5vbCQyS5cuNTw9PY25c+caJ06cMLp162akTZvWuH79+hPP37t3r+Hu7m6MGzfOOHnypDFkyBDDw8PDOH78eBxXnnDZe89bt25tTJ8+3Thy5Ijxxx9/GB07djTSpElj/PXXX3FcecJl7z1/5MKFC0aOHDmMqlWrGo0aNYqbYhMJe+95aGioUbZsWaNevXrGnj17jAsXLhg7d+40jh49GseVJ1z23vPFixcbXl5exuLFi40LFy4YmzdvNrJly2b07t07jitPuDZs2GAMHjzYWLVqlQEYq1evfub558+fN1KkSGH06dPHOHnypDF16lTD3d3d2LRpk13fN9GFkfLlyxs9e/a0vY+MjDSyZ89ujBkz5onnv/XWW0b9+vWjHKtQoYLx9ttvO7XOxMTee/5fERERRqpUqYz58+c7q8REJyb3PCIiwqhUqZLxzTffGB06dFAYsZO993zGjBlG/vz5jbCwsLgqMdGx95737NnTqFGjRpRjffr0MSpXruzUOhOr6ISRAQMGGC+//HKUYy1atDDq1Klj1/dKVI9pwsLCOHToEL6+vrZjbm5u+Pr6sm/fvides2/fvijnA9SpU+ep50tUMbnn/3X//n3Cw8MdvvFSYhXTez5y5EgyZ85Mly5d4qLMRCUm9/zHH3+kYsWK9OzZkyxZslCsWDE+++wzIiMj46rsBC0m97xSpUocOnTI9ijn/PnzbNiwgXr16sVJzUmRo36HunwFVke6desWkZGRttVbH8mSJQunTp164jUBAQFPPD8gIMBpdSYmMbnn/zVw4ECyZ8/+2B9oebKY3PM9e/YwZ84cjh49GgcVJj4xuefnz59n+/bttGnThg0bNnD27Fl69OhBeHg4w4YNi4uyE7SY3PPWrVtz69YtqlSpgmEYRERE8M477/Dxxx/HRclJ0tN+hwYFBfHgwQOSJ08erXYSVc+IJDyff/45S5cuZfXq1Xh7e7u6nETp3r17tGvXjtmzZ5MxY0ZXl5NkWK1WMmfOzKxZsyhTpgwtWrRg8ODBzJw509WlJVo7d+7ks88+46uvvuLw4cOsWrWK9evXM2rUKFeXJs+RqHpGMmbMiLu7O9evX49y/Pr162TNmvWJ12TNmtWu8yWqmNzzRyZMmMDnn3/Otm3bKFGihDPLTFTsvefnzp3j4sWLNGjQwHbMarUCkCxZMk6fPk2BAgWcW3QCF5M/59myZcPDwwN3d3fbsRdffJGAgADCwsLw9PR0as0JXUzu+SeffEK7du3o2rUrAMWLFyckJITu3bszePBgh297L0//HZo6depo94pAIusZ8fT0pEyZMvj5+dmOWa1W/Pz8qFix4hOvqVixYpTzAbZu3frU8yWqmNxzgHHjxjFq1Cg2bdpE2bJl46LURMPee160aFGOHz/O0aNHbV8NGzbEx8eHo0ePkitXrrgsP0GKyZ/zypUrc/bsWVvwAzhz5gzZsmVTEImGmNzz+/fvPxY4HoVBQ9uwOYXDfofaN7Y2/lu6dKnh5eVlzJs3zzh58qTRvXt3I23atEZAQIBhGIbRrl0746OPPrKdv3fvXiNZsmTGhAkTjD/++MMYNmyYpvbayd57/vnnnxuenp7GypUrjWvXrtm+7t2756ofIcGx957/l2bT2M/ee+7v72+kSpXKeO+994zTp08b69atMzJnzmx8+umnrvoREhx77/mwYcOMVKlSGd99951x/vx5Y8uWLUaBAgWMt956y1U/QoJz794948iRI8aRI0cMwJg4caJx5MgR49KlS4ZhGMZHH31ktGvXznb+o6m9/fv3N/744w9j+vTpmtr7yNSpU43cuXMbnp6eRvny5Y1ffvnF9lm1atWMDh06RDl/+fLlRuHChQ1PT0/j5ZdfNtavXx/HFSd89tzzPHnyGMBjX8OGDYv7whMwe/+c/5vCSMzYe89//vlno0KFCoaXl5eRP39+Y/To0UZEREQcV52w2XPPw8PDjeHDhxsFChQwvL29jVy5chk9evQw/v7777gvPIHasWPHE/9+fnSfO3ToYFSrVu2xa0qVKmV4enoa+fPnN7799lu7v6/FMNR3JSIiIq6TqMaMiIiISMKjMCIiIiIupTAiIiIiLqUwIiIiIi6lMCIiIiIupTAiIiIiLqUwIiIiIi6lMCIiIiIupTAiIiIiLqUwIiIiIi6lMCIiNtWrV6dXr17Rfu+sNhKqxPyziThTMlcXICLPFhAQwOjRo1m/fj1Xrlwhc+bMlCpVil69elGzZk2nfu9Vq1bh4eHh0Gv+/Xn16tUpVaoUkydPjk2ZAHTs2JE7d+6wZs2aWLclInFLYUQkHrt48SKVK1cmbdq0jB8/nuLFixMeHs7mzZvp2bMnp06deuyasLAwPD09HfL906dP7/BrYtKmiCRuekwjEo/16NEDi8XCgQMHaNasGYULF+bll1+mT58+/PLLL4DZu/Dee+/Rq1cvMmbMSJ06dQCwWq2MGTOGfPnykTx5ckqWLMnKlSttbYeEhNC+fXtSpkxJtmzZ+OKLLx77/k967BAREcF7771HmjRpyJgxI5988gn/3vz7eY8qHn3esWNHdu3axZQpU7BYLFgsFi5evMiCBQvIkCEDoaGhUa5r3Lgx7dq1s/cWRutezJo1i+zZs2O1WqNc16hRIzp37hytNqLrwIEDVK9eneTJk1O0aFEOHjzIrFmzaNiwYYx+NpHEQGFEJJ4KDAxk06ZN9OzZkxdeeOGxz9OmTWt7PX/+fDw9Pdm7dy8zZ84EYMyYMSxYsICZM2dy4sQJevfuTdu2bdm1axcA/fv3Z9euXfzwww9s2bKFnTt3cvjw4efWNX/+fJIlS8aBAweYMmUKEydO5JtvvrH755syZQoVK1akW7duXLt2jWvXrpErVy7efPNNIiMj+fHHH23n3rhxg/Xr19uCgb2edy/efPNNbt++zY4dO2zXPLr/bdq0iVYb0fHLL79QrVo16tevz2+//caLL77IyJEjGTt2LCNGjIjRzyaSGOgxjUg8dfbsWQzDoGjRos89t1ChQowbN872PjQ0lM8++4xt27ZRsWJFAPLnz8+ePXv4+uuvKVOmDHPmzGHRokW2cSfz588nZ86cz/1euXLlYtKkSVgsFooUKcLx48eZNGkS3bp1s+vnS5MmDZ6enqRIkYKsWbPajidPnpzWrVvz7bff8uabbwKwaNEicufOTfXq1e36HvD8e1GtWjXSpUtH3bp1WbJkie1+rFy5kowZM+Lj4xOtNqKjT58+vPnmm/Tv3x+AVq1a0apVKxo1asQrr7xi988mklgojIjEU/9+9PE8ZcqUifL+7Nmz3L9/n1q1akU5HhYWxiuvvMK5c+cICwujQoUKts/Sp09PkSJFnvu9Xn31VSwWi+19xYoV+eKLL4iMjMTd3T3aNT9Lt27dKFeuHFeuXCFHjhzMmzePjh07Rvm+0fW8e/FImzZt6NatG1999RVeXl4sXryYli1b4ubmFu02nuWvv/5i3759TJgwwXYsWbJkGIahXhFJ8hRGROKpQoUKYbFYnjhI9b/++xgnODgYgPXr15MjR44on3l5eREYGOi4Qp3glVdeoWTJkixYsIDatWtz4sQJ1q9fH6O2nncvHmnQoAGGYbB+/XrKlSvH7t27mTRpkl1tPMsff/wBQOnSpW3HTp8+Tfny5SlevLidP5VI4qIwIhJPpU+fnjp16jB9+nQ++OCDxwLHnTt3oowb+beXXnoJLy8v/P39n/gIIV26dHh4eLB//35y584NwN9//82ZM2ee+8hh//79Ud7/8ssvFCpUKEa9Ip6enkRGRj7xs65duzJ58mSuXLmCr68vuXLlsrt9eP69eMTb25umTZuyePFizp49S5EiRWzBIbptPMvdu3dxd3e39e4EBgYyYcIESpYsGaP2RBIThRGReGz69OlUrlyZ8uXLM3LkSEqUKEFERARbt25lxowZtn9t/1eqVKno168fvXv3xmq1UqVKFe7evcvevXtJnTo1HTp0oEuXLvTv358MGTKQOXNmBg8ejJvb88e0+/v706dPH95++20OHz7M1KlTnzgTJzry5s3L/v37uXjxIilTpiR9+vS2Glq3bk2/fv2YPXs2CxYsiFZ7d+/e5ejRo1GOZciQ4bn34pE2bdrwxhtvcOLECdq2bWs7Hp37+TylSpUiMjKScePG8eabb/Lhhx+SN29eTp48yaVLl8iTJ0+0fkaRxEhhRCQey58/P4cPH2b06NH07duXa9eukSlTJsqUKcOMGTOeee2oUaPIlCkTY8aM4fz586RNm5bSpUvz8ccfAzB+/HiCg4Np0KABqVKlom/fvty9e/e5NbVv354HDx5Qvnx53N3d+fDDD+nevXuMfr5+/frRoUMHXnrpJR48eMCFCxfImzcvYA5wbdasGevXr6dx48bRam/nzp2PjeHo0qULs2fPfua9eKRGjRqkT5+e06dP07p16yifPe9+Pk/BggUZOXIkU6ZM4bPPPqNly5YsWbKE2rVr8/rrrz81WIokBRbDnlFyIiJxqGbNmrz88st8+eWXri5FRJxIYURE4p2///6bnTt30rx5c06ePBmtWT4iknDpMY2IxDuvvPIKf//9N2PHjlUQEUkC1DMiIiIiLqXl4EVERMSlFEZERETEpRRGRERExKUURkRERMSlFEZERETEpRRGRERExKUURkRERMSlFEZERETEpRRGRERExKUURkRERMSl/gcdSfoSTJVlFwAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAIWCAYAAACFnY2vAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB8SUlEQVR4nO3dd3xN9x/H8ddNJEHtvUft2rVKqYSgpUZLa29qtbW1aq+qTdWuVq1SitYeEZQqNWsUtRpFrBASZJ7fH6furykqN+7IeD8fjzx+956ce76fnPrJ2/d8h8UwDAMRERERF3FzdQEiIiKStCmMiIiIiEspjIiIiIhLKYyIiIiISymMiIiIiEspjIiIiIhLKYyIiIiISymMiIiIiEspjIiIiIhLKYyIiIiIS7k0jOzatYv69euTI0cOLBYLa9aseeZnduzYwcsvv4yXlxcFCxZkwYIFDq9TREREHMelYSQ0NJTSpUszY8aMWJ1/4cIF6tWrh4+PD0eOHKFXr1506tSJzZs3O7hSERERcRRLfNkoz2KxsHr1aho1avTUcz766CPWr1/P8ePHrceaNWvGnTt32LRpkxOqFBEREXtL5uoCbLF37158fX1jHKtTpw69evV66mfCwsIICwuzvo+OjiYoKIiMGTNisVgcVaqIiEiiYxgG9+7dI0eOHLi52e/hSoIKI4GBgWTNmjXGsaxZs3L37l0ePHhAihQpHvvM2LFjGTFihLNKFBERSfQuXbpErly57Ha9BBVG4mLgwIH06dPH+j44OJg8efJw6dIl0qRJ48LKRERE4r9r167h4+PL5csBQF7gT1KnTm3XNhJUGMmWLRvXrl2LcezatWukSZPmib0iAF5eXnh5eT12PE2aNAojIiIiz/DCCy/g4VEN2MdLL63l5Mlidh/mkKDWGalcuTJ+fn4xjm3dupXKlSu7qCIREZHEbdcudy5e/Ab4mRkzcjikDZeGkZCQEI4cOcKRI0cAc+rukSNHCAgIAMxHLG3atLGe37VrV86fP8+AAQM4deoUM2fO5LvvvqN3796uKF9ERCRROn/+PP379+fBgyi6dwdwp3v3zLz8smPac+ljmgMHDuDj42N9/2hsR9u2bVmwYAFXr161BhOA/Pnzs379enr37s20adPIlSsXX375JXXq1HF67SIiIonRuXPn8Pb25q+//uLgweScOjWKLFlg9GjHtRlv1hlxlrt375I2bVqCg4OfOmbEMAwiIyOJiopycnWJi7u7O8mSJdMUahGRBOLs2bP4+Pjw119/UaBAUS5f9ufhw2wsXAitW8fud2hcJKgBrM4QHh7O1atXuX//vqtLSRRSpkxJ9uzZ8fT0dHUpIiLyH/744w98fHy4fPkyxYoVI3duf86dy8prr0GrVo5tW2HkH6Kjo7lw4QLu7u7kyJEDT09P/as+jgzDIDw8nBs3bnDhwgUKFSpk1wVyRETEfv744w+8vb25cuUKL730Eh99tJ22bbOSLBnMnAmO/lWoMPIP4eHhREdHkzt3blKmTOnqchK8FClS4OHhwZ9//kl4eDjJkyd3dUkiIvIv4eHh1KlThytXrlC8eHHWrduOt3cWAPr2heLFHV+D/qn6BPoXvP3oXoqIxG+enp5Mnz6dcuXK4e/vz7x5WfjzT8idG4YMcU4N+k0hIiKSBP1z/kq9evXYv38/t25lZsIE89jnn8MLLzinFoURERGRJOb333+nYsWKnDt3znrMYnGjRw+IiIB69aBhQ+fVozAiIiKShJw8eRJvb28OHDgQY9f7Zctg+3ZIntzsFXHm/A2FkUTAYrH859fw4cOt5xYtWhQvLy8CAwMfu463t7f1M8mTJ6dw4cKMHTs2RlfexYsXY1w7Y8aM1K5dm8OHDzvjRxURkedw4sQJvL29uX79OmXKlGHBggUABAfDoz1lBw2CF190bl0KI4nA1atXrV9Tp04lTZo0MY7169cPgN27d/PgwQOaNGnCN99888Rrde7cmatXr3L69GkGDhzI0KFDmT179mPnbdu2jatXr7J582ZCQkJ44403uHPnjiN/TBEReQ7Hjx/Hx8eHGzduULZsWfz8/MiYMSNgDlQNDITChaF/f+fXpjDyDIYBoaGu+Yrt2rjZsmWzfqVNmxaLxRLjWKpUqQCYP38+LVq0oHXr1nz11VdPvFbKlCnJli0befPmpX379pQqVYqtW7c+dl7GjBnJli0b5cuXZ+LEiVy7do19+/bF+T6LiIjjHDt2zBpEXn75ZbZt20aGDBkAOHQIZswwz5sxA56w0b3DaZ2RZ7h/H/7+Xe50ISH2G8l87949VqxYwb59+yhatCjBwcH89NNPVKtW7YnnG4bB7t27OXXqFIUKFfrPa6dIkQIw56qLiEj807dvX27evEm5cuXYunUr6dOnByA6Grp1M/+3WTPw9XVNfeoZSSKWLVtGoUKFKF68OO7u7jRr1oz58+c/dt7MmTNJlSoVXl5evPbaa0RHR/Phhx8+9bp37txh1KhRpEqViooVKzryRxARkTj69ttvad++Pdu2bbMGEYAvv4T9+yF1apg0yXX1qWfkGVKmNHsoXNW2vXz11Ve0+sfmAq1ataJ69epMnz6d1KlTW4+3bNmSQYMGcfv2bYYNG0aVKlWoUqXKY9erUqUKbm5uhIaG8uKLL7J8+XKyZs1qv4JFROS5BAUFWR/FZMyY8bHH8zduwMcfm69HjYIcOZxd4f8pjDyDxeK8RV8c5eTJk/zyyy/s37+fjz76yHo8KiqKZcuW0blzZ+uxtGnTUrBgQQC+++47ChYsyCuvvILvv/ruli9fzksvvUTGjBlJly6dU34OERGJnUOHDlGrVi1GjRpF9+7dn3jORx/B7dtQujT06OHkAv9Fj2mSgPnz5/Paa69x9OhRjhw5Yv3q06fPEx/VPJIqVSp69uxJv379YkzvBcidOzcFChRQEBERiWcOHTqEr68vQUFBLFq0iMjIyMfO2b0bvv7afD1rFiRzcdeEwkgiFxERwaJFi2jevDklSpSI8dWpUyf27dvHiRMnnvr5Ll26cObMGb7//nsnVi0iInFx8OBBatasye3bt3nllVfYvHkzyf6VNCIizEGrAJ07Q+XKLij0XxRGErkff/yRW7du8dZbbz32vWLFilGsWLH/7B3JkCEDbdq0Yfjw4URHRzuyVBEReQ6//vorvr6+3Llzh8qVK7N582bSpEnz2Hmffw7Hj0PGjDB2rAsKfQKL8e/+90Tu7t27pE2bluDg4Mf+Iz18+JALFy6QP39+bXdvJ7qnIiKOt3//fmrXrk1wcDCvvvoqGzdujDE54ZGLF6FECXMtq/nzoUMH29r5r9+hz0M9IyIiIgncjh07CA4OpmrVqk8NImFh8M47ZhB59VVo1875dT6NZtOIiIgkcAMGDCBz5sw0adLkiUEEoG9fOHAA0qeHJUvALR51R8SjUkRERCS2jhw5Qsg/FsJq3779U4PI8uX/X/J98WLIm9cZFcaewoiIiEgCs3fvXl577TXq1atHaGjof557+jR06mS+/uQTqFvXCQXaSGFEREQkAfn555+pXbs29+7dw+0Zz1pCQ6FxY3MlcW9vGDHCOTXaSmFEREQkgdi9ezd16tQhJCSEGjVqsH79el54yjLhhgHdu8OJE5AtG3z7resXN3sahREREZEE4KeffuL1118nJCSEmjVrsnbtWlL+xyZm8+fDwoXmQNVly8xAEl8pjIiIiMRzP/30E2+88QahoaH4+vo+M4gcOQLvv2++HjMGqld3Tp1xFU87bEREROSR1KlT4+XlRZUqVfjhhx9IkSLFU88NDoYmTcx1Rd58EwYMcGKhcaSekUTC29ubXr16PfX7+fLlY+rUqXZt0xHXFBGRx5UpU4Y9e/Y8M4gYhrmq6rlz5vTdb76JX+uJPI16RhKJVatW4eHh4eoyRETETvz9/fHw8KBq1aoAFC1a9JmfmToVVq0CDw/47jvIkMHBRdqJwkgikSGh/IkTEZFn8vPzo379+ri7u7Nnzx5KlSr1zM/8/PP/H8lMmQIVKzq4SDtKAJ038UNoaOhTvx4+fBjrcx88eBCrc231z8c0169fp379+qRIkYL8+fOzZMmSx86/c+cOnTp1InPmzKRJk4YaNWpw9OhR6/fPnTtHw4YNyZo1K6lSpaJChQps27bN5rpERMQ227Zt48033+TBgwdUr16dIkWKPPMzN29C06YQGQnNmplTehMS9YzEUqpUqZ76vbp167J+/Xrr+yxZsnD//v0nnlu9enV27NhhfZ8vXz5u3rz52HnPs5lyu3btuHLlirWL78MPP+T69esxznnnnXdIkSIFGzduJG3atMyZM4eaNWty5swZMmTIQEhICHXr1mXMmDF4eXmxcOFC6tevz+nTp8mTJ0+caxMRkafbunUrDRo04OHDh7z55pusXLkSLy+v//xMdDS0agV//QVFisDcuWCxOKlgO1EYSWTOnDnDxo0b2b9/PxUqVABg/vz5FCtWzHrO7t272b9/P9evX7f+IZ84cSJr1qxh5cqVvPfee5QuXZrSpUtbPzNq1ChWr17Njz/+yPuP5ouJiIjdbNmyhQYNGhAWFkb9+vVZsWLFM4MImFN3N2+GFClg5Up4yvY08ZrCSCz9czOif3N3d4/x/t+9EP/076V7L168+Fx1/dvvv/9OsmTJKFeunPVY0aJFSZcunfX90aNHCQkJIWPGjDE+++DBA86dOweYP+/w4cNZv349V69eJTIykgcPHhAQEGDXekVEBPbv328NIg0bNuS7777D09PzmZ/btg2GDTNfz54NJUo4uFAHURiJpactt+vMc+0lJCSE7Nmzx3hc9Mij0NKvXz+2bt3KxIkTKViwIClSpKBJkyaEh4c7t1gRkSSgdOnS+Pr64uHhwfLly2MVRC5fhhYtzOm8nTpBmzZOKNRBFEYSmaJFixIZGcnBgwetj2lOnz7NnTt3rOe8/PLLBAYGkixZMvLly/fE6+zZs4d27drx1ltvAWaAsXcvjoiImLy8vPj++++xWCyxCiIREeZA1Rs3oEwZ+Pxzx9foSJpNk8gUKVKE119/nS5durBv3z4OHjxIp06dYiyS4+vrS+XKlWnUqBFbtmzh4sWL/PzzzwwaNIgDBw4AUKhQIVatWsWRI0c4evQoLVq0IDo62lU/lohIorNu3Tr69+9vnbDg5eUVqyACMGgQ7N4NadLAihXmeJGETGEkEfr666/JkSMH1atX5+233+a9994jS5Ys1u9bLBY2bNjAa6+9Rvv27SlcuDDNmjXjzz//JGvWrABMnjyZ9OnTU6VKFerXr0+dOnV4+eWXXfUjiYgkKmvXruXtt99m4sSJLFq0yKbP/vADTJhgvv76ayhY0AEFOpnFeJ45pAnQ3bt3SZs2LcHBwaRJkybG9x4+fMiFCxfInz8/yZMnd1GFiYvuqYhITD/++CNNmjQhIiKCd955hyVLlsR6Be0jR6BaNQgJgd69YfJkx9b6b//1O/R5qGdERETESdasWWMNIu+++y5Lly6NdRC5fNnc+C4kBGrUgHHjHFysEymMiIiIOMHq1at55513iIiIoFmzZixZsoRkyWI3jyQkxAwily9DsWLw/ffm/jOJhcKIiIiIg125coXmzZsTGRlJixYtWLRoUayDSFSUOXPmyBHIkgXWr4d/LB2VKGhqr4iIiIPlyJGD+fPns3XrVubPn//YYplPYxjQq5cZQJInhx9/hPz5HVurKyiMPEESG9PrULqXIpKURUREWMeEtGzZkpYtW9r0+c8/hy++MPeaWbwYKlVyRJWup8c0//DoD8zTNrkT2z26l7EdoCUiklh89913lClThitXrsTp8z/8YM6YARg/Hho3tmNx8Yx6Rv7B3d2ddOnSWfeWSZkyJZaEtvVhPGEYBvfv3+f69eukS5cu1l2SIiKJwfLly2nZsiVRUVHMnDmT0aNH2/T5gwf/v9R7ly7Qt6+DCo0nFEb+JVu2bMB/b3YnsZcuXTrrPRURSQq+/fZbWrVqRXR0NO3bt2fEiBE2fT4gwJw5c/8+1Knz/8c0iZnCyL9YLBayZ89OlixZiIiIcHU5CZqHh4d6REQkSVm6dCmtW7cmOjqaDh06MG/evMd2a/8vd+9CvXoQGAglS8J330EsJ90kaEngR4wbd3d3/SIVEZFYW7x4MW3btiU6OppOnToxZ84cm4JIRAS88w4cPw7Zs5szaOy4yGm8pgGsIiIizyk8PJwxY8YQHR1N586dbQ4ihgEffABbtkDKlLB2LeTO7cCC4xn1jIiIiDwnT09Ptm3bxrx58xg6dKhNQQRg4kSYM8ccG/Ltt1CunIMKjae0UZ6IiEgc/fnnn+TNm/e5rrFypfl4BmDqVOjZ8/nrchRtlCciIhKPfP311xQsWJBvv/02ztfYtw9atzZff/BB/A4ijqQwIiIiYqP58+fTsWNHIiMj+eWXX+J0jQsXoEEDePjQnMo7ZYqdi0xAFEZERERs8OWXX9KpUycMw+D9999n6tSpNl/jzh1zCu/161C2rDlOJClP4FQYERERiaW5c+fSuXNnAD744AM+//xzm1fqjogwl3b//XfImdOcOZMqlSOqTTgURkRERGJhzpw5dOnSBYCePXsybdq0OG0Z8uGHsH27GUDWrzcDSVKnMCIiIhILJ0+eBKB3795MmTIlTkFk1iyYPfv/U3hLl7Z3lQmT1hkRERGJhalTp+Lj40PDhg3jFER27jR7RQA+/dQctCom9YyIiIg8xdq1awkLCwPMvcsaNWoUpyBy4YI5TiQyEpo3h48+snelCZvCiIiIyBNMnz6dBg0a8M477xAZGRnn64SEQMOGcOuWubLq/PmJfxdeWymMiIiI/Mu0adP48O9nKsWLF4/zxqnR0dCmDRw7Blmzwpo1kCKFHQtNJBRGRERE/mHq1Kn06tULgIEDB/Lpp5/G6dEMwMiRsHo1eHqa/5srlx0LTUQURkRERP42ZcoUevfuDcCgQYMYM2ZMnIPI99/DiBHm69mzoXJle1WZ+CiMiIiIAJ9//jl9+vQBYMiQIYwaNSrOQeToUfPxDECvXtC+vZ2KTKQ0tVdERAR4+eWXeeGFF+jbty/Dhw+PcxC5ccMcsHr/PtSqBRMm2LnQREhhREREBKhatSonTpwgb968cb5GeDg0aQJ//gkFC8KyZZBMv2mfSY9pREQkyZo6dSpHjx61vn+eIALQsyfs2gWpU8OPP0KGDM9bYdKgMCIiIknSp59+Su/evalZsybXr19/7uv9c6n3pUuhWDE7FJlEKIyIiEiSM3r0aAYNGgSYe81kyZLlua6npd6fj55kiYhIkjJq1CiGDh0KmL0jAwcOfK7raan356eeERERSTJGjBhhDSJjx4597iCipd7tQz0jIiKSJCxYsIDhw4cDMG7cOAYMGPBc19NS7/ajMCIiIklCkyZN+Oqrr6hfvz79+/d/7utpqXf7URgREZEkIVWqVPj5+eHh4fHc11q1Sku925PGjIiISKJkGAaDBw9m1KhR1mP2CCIBAdChg/laS73bh3pGREQk0TEMg0GDBjF27FgA6tSpQ8WKFZ/7utHR0K4dBAdDpUpa6t1eFEZERCRRMQyDgQMHMm7cOACmTZtmlyACMGUK+PtDypSwaJGWercX3UYREUk0DMPgo48+YsLfXRbTp0/n/ffft8u1f/sNPvnEfD1lChQqZJfLCgojIiKSSBiGQf/+/Zk0aRIAX3zxBT169LDLtR8+hFatzI3w3nwTOne2y2XlbwojIiKSKOzZs8caRGbMmEH37t3tdu3Bg831RDJnhi+/1MJm9uby2TQzZswgX758JE+enEqVKrF///7/PH/q1KkUKVKEFClSkDt3bnr37s3Dhw+dVK2IiMRXVatW5fPPP2fWrFl2DSL+/jB5svl6/nxzgTOxL5f2jCxfvpw+ffowe/ZsKlWqxNSpU6lTpw6nT59+4qZFS5cu5eOPP+arr76iSpUqnDlzhnbt2mGxWJj86E+KiIgkGYZhEBoaSqpUqQD44IMP7Hr9O3egbVswDPPRTP36dr28/M2lPSOTJ0+mc+fOtG/fnpdeeonZs2eTMmVKvvrqqyee//PPP/Pqq6/SokUL8uXLR+3atWnevPkze1NERCTxMQyDnj178tprrxEUFOSQNnr0gEuXoGDB//eOiP25LIyEh4dz8OBBfH19/1+Mmxu+vr7s3bv3iZ+pUqUKBw8etIaP8+fPs2HDBurWrfvUdsLCwrh7926MLxERSdgMw+DDDz9k+vTpHD58GH9/f7u3sWwZLF0K7u7mNN6/O1/EAVz2mObmzZtERUWR9V8P37JmzcqpU6ee+JkWLVpw8+ZNqlatimEYREZG0rVrVz55NNfqCcaOHcuIR2v2iohIgmcYBu+//z4zZ87EYrEwb948GjdubNc2Ll2Cbt3M14MGwSuv2PXy8i8uH8Bqix07dvDpp58yc+ZMDh06xKpVq1i/fn2MpX7/beDAgQQHB1u/Ll265MSKRUTEnqKjo+nRo4c1iMyfP5+OHTvauQ1zldU7d6BCBXMmjTiWy3pGMmXKhLu7O9euXYtx/Nq1a2TLlu2JnxkyZAitW7emU6dOAJQsWZLQ0FDee+89Bg0ahJvb49nKy8sLLy8v+/8AIiLiVNHR0XTv3p05c+ZgsVj4+uuvadu2rd3bmTYNtm83V1ldvBjssJ2NPIPLekY8PT0pV64cfn5+1mPR0dH4+flR+SnbH96/f/+xwOHu7g6Y3XYiIpJ4Xb9+nXXr1mGxWFiwYIFDgsjx4zBwoPl60iQoXNjuTcgTuHRqb58+fWjbti3ly5enYsWKTJ06ldDQUNr/vQVimzZtyJkzp3Wjo/r16zN58mTKli1LpUqVOHv2LEOGDKF+/frWUCIiIolTtmzZ8Pf359ChQzRt2tTu1w8LM1dZDQuDevWgSxe7NyFP4dIw0rRpU27cuMHQoUMJDAykTJkybNq0yTqoNSAgIEZPyODBg7FYLAwePJjLly+TOXNm6tevz5gxY1z1I4iIiANFR0fz22+/UaZMGQAKFSpEIQdtCjN0KBw9CpkyaZVVZ7MYSez5xt27d0mbNi3BwcGkSZPG1eWIiMhTREdH06lTJ5YsWcKaNWt44403HNbWzp3g42MubrZmDTRs6LCmEjRH/Q7V3jQiIhLvREVF0alTJxYsWICbmxt37txxWFvBwdCmjRlEOnZUEHEFhREREYlXoqKi6NChAwsXLsTd3Z0lS5Y4ZIzIIx98AAEB8OKLMGWKw5qR/6AwIiIi8UZUVBTt27dn0aJFuLu7s3TpUt59912Htbdihbm6qpub+b+pUzusKfkPCiMiIhIvREVF0a5dOxYvXoy7uzvLli2jSZMmDmvv8uX/z5gZOBCqVHFYU/IMCWoFVhERSfySJUvG8uXLHRpEoqOhfXu4fRvKlYNhwxzWlMSCwoiIiMQL7u7uLFiwgN27d9t9r5l/Mgzo2xe2boUUKbTKanygMCIiIi4TGRnJrFmziIqKAsxAUqlSJYe2OWoUTJ1qvp43D4oWdWhzEgsKIyIi4hIRERG0bNmS7t2708VJy51On/7/RzKffw4tWzqlWXkGDWAVERGni4iIoEWLFqxcuRIPDw8aNGjg8DYXLYIPPzRfjxhhTumV+EFhREREnCoiIoLmzZvz/fff4+npycqVK6lfv75D2/zhB3PAKkDPnjBkiEObExspjIiIiNNERETQrFkzVq1ahaenJ6tWraJevXoObdPfH5o2hagoaNsWJk/WvjPxjcKIiIg4TZs2baxBZPXq1dStW9eh7f36KzRoYO7E26iRuQGem0ZLxjv6TyIiIk7TunVr0qRJw5o1axweRE6ehDfegJAQqFEDvv0Wkumf4PGS/rOIiIjT1K1blwsXLpAhQwaHtnPxItSuDbduQcWK5k68yZM7tEl5DuoZERERhwkLC+O9997j7Nmz1mOODiKBgeDray73/tJLsGGD9pyJ7xRGRETEIcLCwmjcuDHz5s2jXr16REZGOrzN27ehTh04dw7y5YMtWyBjRoc3K89JYUREROzu4cOHvP3226xfv54UKVIwc+ZMkjl4wEZoKNSrB7/9BtmywbZtkDOnQ5sUO9GYERERsatHQWTjxo2kSJGCdevWUaNGDYe2GRYGb78Ne/dCunSweTMUKODQJsWOFEZERMRuHj58yFtvvcWmTZtIkSIF69evx8fHx6FtRkVBq1bmI5mUKc0xIqVKObRJsTM9phEREbsZOHAgmzZtImXKlGzYsMHhQcQwoEsXWLnS3Hl3zRqoXNmhTYoDKIyIiIjdDBkyhKpVq7Jhwwa8vb0d2pZhwIABMH++uZDZt99CrVoObVIcRI9pRETkuURFReHu7g6Y03Z37dqFxQnrrY8dCxMnmq/nzYPGjR3epDiIekZERCTO7t+/T506dZg5c6b1mDOCyLRpMGiQ+XrCBOjQweFNigMpjIiISJyEhoZSv359/Pz8+Pjjj7l+/bpT2p03D3r1Ml8PHQr9+jmlWXEgPaYRERGbhYaG8uabb7Jjxw5SpUrFxo0byZIli8PbXbzYHLAKZggZPtzhTYoTKIyIiIhNQkNDqVevHjt37iR16tRs2rSJKlWqOLzd77+Hdu3Mgavdu8P48eCEJ0LiBAojIiISayEhIdSrV49du3aRJk0aNm/ezCuvvOLwdjdsgObNzTVF2rWD6dMVRBIThREREYm177//3hpEtmzZQqVKlRze5vbt5uqqERHQtCl8+aU5lVcSD4URERGJtbZt2xIYGIiPjw8VK1Z0eHt79kCDBuZy7w0bwqJF8PcsYklELIZhGK4uwpnu3r1L2rRpCQ4OJk2aNK4uR0Qk3rt37x4Wi4VUqVI5td0DB6BmTbh7F2rXhh9/BC8vp5Yg/+Ko36Hq6BIRkae6e/cuderUoV69eoSGhjqt3WPHoE4dM4i89hqsXq0gkpgpjIiIyBMFBwdTp04d9u7dy7Fjx7hw4YJT2j192lzWPSgIKlWCdevMDfAk8dKYERERecyjILJv3z7Sp0/Ptm3bKFGihMPbvXDBfDRz7RqUKQMbN0Lq1A5vVlxMPSMiIhLDnTt3qF27Nvv27SNDhgz4+fnx8ssvO7zdv/4yg8jly1CsGGzZAunTO7xZiQfUMyIiIlaPgsivv/5KxowZ8fPzo3Tp0g5v99o1M4hcuAAFCoCfH2TO7PBmJZ6wuWfk/PnzjqhDRETigcuXL3Pu3DmnBpFbt8wxImfOQJ48ZhDJnt3hzUo8YnMYKViwID4+PixevJiHDx86oiYREXGR4sWL4+fnx/bt250SRIKD4fXXzdkz2bObQSRvXoc3K/GMzWHk0KFDlCpVij59+pAtWza6dOnC/v37HVGbiIg4QVBQUIy/x8uUKUOpUqUc3m5ICNSrZ64nkikTbNsGBQs6vFmJh2wOI2XKlGHatGlcuXKFr776iqtXr1K1alVKlCjB5MmTuXHjhiPqFBERBwgKCsLX15eaNWuyZ88ep7V7757ZI7JnD6RLZw5WfeklpzUv8UycZ9MkS5aMt99+mxUrVjBu3DjOnj1Lv379yJ07N23atOHq1av2rFNEROzs1q1b1KxZk8OHD5MyZUrSpUvnlHaDg80Fzf4ZRMqWdUrTEk/FOYwcOHCA7t27kz17diZPnky/fv04d+4cW7du5cqVKzRs2NCedYqIiB3dvHmTmjVrcuTIEbJmzYq/vz/Fixd3eLt37phLu+/da07b3bYNKlRweLMSz9k8tXfy5Ml8/fXXnD59mrp167Jw4ULq1q2L299bKObPn58FCxaQL18+e9cqIiJ2cOPGDWrWrMmxY8esQaRYsWIObzcoyAwiBw9CxoxmEClTxuHNSgJgcxiZNWsWHTp0oF27dmR/ytyrLFmyMH/+/OcuTkRE7OvRo5ljx46RLVs2/P39KVq0qBPaBV9fOHLEXD/Ezw9KlnR4s5JA2BxGtm7dSp48eaw9IY8YhsGlS5fIkycPnp6etG3b1m5FioiIfaRKlYq8efNy8+ZN/P39KVKkiMPbvHHDXNDs2DHIkgW2bwcnPBGSBMRiGIZhywfc3d25evUqWbJkiXH81q1bZMmShaioKLsWaG+O2v5YRCShCAsL4+rVq055nP5oZdUTJyBbNjOIOOGJkDiIo36H2jyA9WnZJSQkhOTJkz93QSIiYl/Xrl1j3Lhx1r+/vby8nBJErl4Fb28ziOTIATt3KojIk8X6MU2fPn0AsFgsDB06lJT/2M85KiqKffv2UUYjkURE4pXAwEBq1KjB77//TlhYGEOHDnVKu5cvQ40a5hLvuXKBv78WNJOni3UYOXz4MGD2jBw7dgxPT0/r9zw9PSldujT9+vWzf4UiIhInV69epUaNGpw6dYpcuXLRsmVLp7R76ZIZRM6eNfea8feHF190StOSQMU6jPj7+wPQvn17pk2bpvEWIiLx2NWrV/Hx8eH06dPkzp0bf39/ChQo4PB2//wTfHzM3Xfz5zfHiGilB3kWm2fTfP31146oQ0RE7OTKlSv4+Phw5swZ8uTJg7+/Py86oWviwgUziPz5JxQoYAaRPHkc3qwkArEKI2+//TYLFiwgTZo0vP322/957qpVq+xSmIiI2C48PBxfX1/OnDlD3rx58ff3J3/+/A5v99w589FMQAAUKmQGkVy5HN6sJBKxCiNp06bFYrFYX4uISPzk6enJ4MGDGTJkCH5+fk6ZNfPHH2aPyOXLUKSIGURy5HB4s5KI2LzOSEKndUZEJCl4+PChU5ZbOH3aDCJXr5q77vr5meuJSOIUb9YZERGR+CUgIIA33niDK1euWI85I4j89hu89poZREqUMGfNKIhIXMTqMU3ZsmWtj2me5dChQ89VkIiIxF5AQADe3t5cuHCBTp06sWHDBqe0++uvUKcO3L5tbna3ZYu554xIXMQqjDRq1MjBZYiIiK3+/PNPfHx8uHDhAgUKFGDOnDlOafenn6BePbh3D155BTZuhHTpnNK0JFIaMyIikgBdvHgRHx8fLl68SIECBdixYwe5nDB9ZcsWaNQIHjwwl3r/8UdIndrhzUo8oTEjIiICwIULF/D29ubixYsUKlSInTt3OiWI/PAD1K9vBpG6dWHDBgURsY9YPabJkCEDZ86cIVOmTKRPn/4/x48EBQXZrTgREXnce++9x59//knhwoXZvn07OXPmdHib334LrVtDVBQ0bgxLl8I/dgUReS6xCiNTpkwh9d/xd+rUqY6sR0REnmHBggV07dqVOXPmkMMJC3rMnw+dO4NhmIHkq68gmc3rd4s8ncaMiIgkAA8ePCBFihROb/fzz6FnT/N1164wYwa46QF/kuWo36FxyrZRUVGsXr2a33//HYCXXnqJhg0bkkxRWUTE7s6ePYuvry9jx46lefPmTmv3009h0CDzdd++MGECxHKVBxGb2NwzcuLECRo0aEBgYCBFihQB4MyZM2TOnJm1a9dSokQJhxRqL+oZEZGE5I8//sDHx4fLly9TsmRJDh48iIeHh0PbNAwzhIwda74fNsz8UhCReDObplOnThQvXpy//vqLQ4cOcejQIS5dukSpUqV477337FaYiEhSd+bMGby9vbl8+TIvvfQSW7dudXgQiY6GXr3+H0QmTIDhwxVExLFsfq5y5MgRDhw4QPr06a3H0qdPz5gxY6hQoYJdixMRSapOnz6Nj48PV69epXjx4vj5+ZE1a1aHthkVBV26mANWAWbOhG7dHNqkCBCHnpHChQtz7dq1x45fv36dggUL2qUoEZGk7NSpU9YgUqJECbZv3+7wIBIRYc6UmT/fHKC6YIGCiDhPrHpG7t69a309duxYPvzwQ4YPH84rr7wCwC+//MLIkSMZN26cY6oUEUlCvv32W65evUrJkiXx8/Mjs4M3fXn4EJo1Mxc1S5bMXEPknXcc2qRIDLEawOrm5hZjobNHH3l07J/vo6KiHFGn3WgAq4jEd4ZhMGHCBNq3b+/wIPLggbm8+5Yt4OUF339v7jsj8iQundrr7+9vtwZFRORx58+fJ2fOnHh5eWGxWBgwYIDD24yMhBYtzCDywgvmPjM1aji8WZHHxCqMVK9e3dF1iIgkWcePH6dmzZpUqlSJFStW4OXl5fA2DcMcE7Jmjdkjsn496K96cZU4rVJ2584d5s+fb130rHjx4nTo0IG0adPatTgRkcTu+PHj1KhRgxs3bvDXX39x//59p4SRoUPhyy/NwapLlyqIiGvZPJvmwIEDFChQgClTphAUFERQUBCTJ0+mQIECHDp0yBE1iogkSseOHcPHx4cbN27w8ssvs23bthjLJjjKF1/A6NHm61mz4O23Hd6kyH+yeQXWatWqUbBgQebNm2dd/j0yMpJOnTpx/vx5du3a5ZBC7UUDWEUkPjh69Cg1a9bk1q1blCtXji1btpAhQwaHt/vdd+bMGcOAkSNhyBCHNymJiKN+h9ocRlKkSMHhw4cpWrRojOMnT56kfPny3L9/327FOYLCiIi42j+DSPny5dmyZYtTekT8/OCNN8w1Rbp3N3tItLKq2CLeLAefJk0aAgICHjt+6dIlUqdObZeiREQSs5CQEB4+fEiFChXYunWrU4LIoUPw1ltmEGnSxNyNV0FE4gubB7A2bdqUjh07MnHiRKpUqQLAnj176N+/v1N3kxQRSaheffVV/P39KVSoEOnSpXN4e2fPmj0i9+6Bjw8sXgzu7g5vViTWbA4jEydOxGKx0KZNGyIjIwHw8PCgW7dufPbZZ3YvUEQkMTh06BBubm6UKVMGwGl7eQUGQp06cP06lCkDq1ebU3lF4hObxoxERUWxZ88eSpYsiZeXF+fOnQOgQIECpEyZ0mFF2pPGjIiIsx08eBBfX1/c3Nz46aefeOmll5zS7t275pTdI0cgf374+WfIls0pTUsiFS/GjLi7u1O7dm3u3LlDypQpKVmyJCVLlkwwQURExNkOHDiAr68vd+7coWjRouTKlcsp7YaFmcu8HzkCmTObq6wqiEh8ZfMA1hIlSnD+/HlH1CIikqj8+uuv1iDy6quvsmnTJqf0yEZFQatW4O8PqVLBxo2gTdUlPrM5jIwePZp+/fqxbt06rl69yt27d2N8iYgI7N+/n1q1ahEcHEzVqlXZuHGjU2YcGgb07AkrV4KHhzlGpFw5hzcr8lxsXmfEze3/+eXfO/lq114REXMdkddee427d+9SrVo1NmzYQKpUqZzS9ujR5kJmFgssWwbvvuuUZiWJcOmuvf9k7x18Z8yYwYQJEwgMDKR06dJMnz6dihUrPvX8O3fuMGjQIFatWkVQUBB58+Zl6tSp1K1b1651iYjEVYECBShdujQWi4X169c7LYjMnfv/FVWnTVMQkYTDpjBiGAY5cuQgPDycIkWKWJeDj6vly5fTp08fZs+eTaVKlZg6dSp16tTh9OnTZMmS5bHzw8PDqVWrFlmyZGHlypXkzJmTP//80ynz9EVEYitVqlRs2LABi8XCCy+84JQ2V682d+EFGDQIPvjAKc2K2EWsH9NcuHCBBg0acPLkSQBy5crF999/T/ny5ePceKVKlahQoQJffPEFANHR0eTOnZsPPviAjz/++LHzZ8+ezYQJEzh16hQeHh5xalOPaUTEEfbs2cPu3bv56KOPnN72rl1Qu7Y5g6ZjR5g3T6urimO4fGpv//79iYyMZPHixaxcuZJcuXLRpUuXODccHh5unXtvLcbNDV9fX/bu3fvEz/z4449UrlyZHj16kDVrVkqUKMGnn376n+NUwsLCNMhWRBxq9+7dvP7663z88ccsXrzYqW3v3Qv16plBpEEDmD1bQUQSnlg/Z9m9ezcrV66katWqALzyyivkypWL0NDQOHVD3rx5k6ioKLJmzRrjeNasWTl16tQTP3P+/Hm2b99Oy5Yt2bBhA2fPnqV79+5EREQwbNiwJ35m7NixjBgxwub6RERi46effuKNN94gNDSUmjVr8vbbbzut7X37zNVVQ0LMZd6XLYPnfHou4hKx7hm5fv06hQoVsr7Pnj07KVKk4Pr16w4p7Emio6PJkiULc+fOpVy5cjRt2pRBgwYxe/bsp35m4MCBBAcHW78uXbrktHpFJHHbtWuXNYj4+vqydu1apy0C+euv5qOZe/fA2xvWroUUKZzStIjdxTpDWywWQkJCSPGPP+1ubm7cu3cvxqOP2D5DypQpE+7u7ly7di3G8WvXrpHtKcsEZs+eHQ8PD9z/scNTsWLFCAwMJDw8HE9Pz8c+4+XlhZc2YhARO9u5cyd169bl/v371KpVix9++CHG34+OdPCgGUTu3oVq1cwg4qRxsiIOEeueEcMwKFy4MOnTp7d+hYSEULZsWdKnT0+6dOls2gbb09OTcuXK4efnZz0WHR2Nn58flStXfuJnXn31Vc6ePUt0dLT12JkzZ8iePfsTg4iIiCMEBgZSr1497t+/T506dZwaRA4fhlq14M4dePVVWL/eXGVVJCGLdc+IvdcXAejTpw9t27alfPnyVKxYkalTpxIaGkr79u0BaNOmDTlz5mTs2LEAdOvWjS+++IKePXvywQcf8Mcff/Dpp5/y4Ycf2r02EZGnyZYtG+PGjWP9+vWsWrWK5MmTO6Xdo0fB1xdu34ZXXoENG8AJi7qKOFysw0j16tXt3njTpk25ceMGQ4cOJTAwkDJlyrBp0ybroNaAgIAYK77mzp2bzZs307t3b0qVKkXOnDnp2bOnS6bSiUjS82ilaYAePXrQrVu3GH9HOdLx42YQCQqCihVh0ybQ6gSSWNi8HHxCp3VGRCQutm7dyvDhw1m7di0ZMmRwatsnT5qDVG/cgPLlYetW0FqP4gouX2dERCSp2rJlCw0aNODnn3+2PjZ2llOnoEYNM4iULQtbtiiISOKjMCIi8h82b95MgwYNePjwIfXr12f06NFOa/v0aXP9kGvXoHRps0fEhnkCIgmGwoiIyFNs2rSJhg0bEhYWRsOGDVm5cqXTlgr44w+zRyQwEEqWhG3bIGNGpzQt4nRxDiNnz55l8+bNPHjwADAHdomIJBYbNmywBpFGjRrx3XffOW0JgXPnzB6RK1egeHHw84NMmZzStIhL2BxGbt26ha+vL4ULF6Zu3bpcvXoVgI4dO9K3b1+7Fygi4mzh4eF8+OGHhIeH89Zbb7F8+XKnBZELF8wgcvkyFCtmBpHMmZ3StIjL2BxGevfuTbJkyQgICIix7HHTpk3ZtGmTXYsTEXEFT09PNm7cSLdu3ZwaRC5eNIPIpUtQpAhs3w7/2r5LJFGyeUulLVu2sHnzZnLlyhXjeKFChfjzzz/tVpiIiLPdunWLjH8PzChUqBAzZ850WtsBAeYYkT//hEKFzCDylJ0xRBIdm3tGQkNDn7gRVFBQkPaAEZEE68cffyR//vxs2LDB6W1fvmwGkQsXoGBB8PeHHDmcXoaIy9gcRqpVq8bChQut7y0WC9HR0YwfPx4fHx+7Fici4gw//PADTZo04d69eyxbtsypbV+7BjVrmoNWX3zRDCI5czq1BBGXs/kxzfjx46lZsyYHDhwgPDycAQMGcOLECYKCgtizZ48jahQRcZjVq1fz7rvvEhkZSbNmzfjqq6+c1nZQkLnp3enTkDu3+WjmX0/ARZIEm3tGSpQowZkzZ6hatSoNGzYkNDSUt99+m8OHD1OgQAFH1Cgi4hCrVq2yBpHmzZuzaNEikiWz+d9ocXL3Lrz+Ohw7Zo4N8fODvHmd0rRIvKO9aUQkSfr+++9p2rQpUVFRtGzZkgULFjgtiISGmkFk925zIbOdO831RETiO0f9DrX5/3m//fbbE49bLBaSJ09Onjx5NJBVROK99evXExUVRatWrViwYAHu7u5OaffhQ2jUyAwiadOaS7wriEhSZ3MYKVOmjHUL7UedKo/eA3h4eNC0aVPmzJlD8uTJ7VSmiIh9zZs3j8qVK9OhQwenBZHwcGjSxFzaPVUq2LTJ3PxOJKmzeczI6tWrKVSoEHPnzuXo0aMcPXqUuXPnUqRIEZYuXcr8+fPZvn07gwcPdkS9IiJx9vPPPxMVFQWAu7s7nTt3dloQiYyEli1h/XpInhzWrYNXXnFK0yLxns09I2PGjGHatGnUqVPHeqxkyZLkypWLIUOGsH//fl544QX69u3LxIkT7VqsiEhcffvtt7Rq1YoWLVo49bEMQHQ0dOgAK1eCpyesWQPVqzuteZF4z+aekWPHjpH3CUO+8+bNy7FjxwDzUc6jPWtERFxt6dKltGrViujoaDw9PWM8WnY0w4Du3WHRInB3h+++g3/8W05EiEMYKVq0KJ999hnh4eHWYxEREXz22WcULVoUgMuXL5NVGyqISDywePFiWrduTXR0NJ06dWLevHm4ucV5w3KbGAb07Qtz5oDFYgaShg2d0rRIgmLzY5oZM2bQoEEDcuXKRalSpQCztyQqKop169YBcP78ebp3727fSkVEbLRo0SLatWtHdHQ0nTt3Zvbs2U4LIgBDh8KUKebrL7+E5s2d1rRIghKndUbu3bvHkiVLOHPmDABFihShRYsWpE6d2u4F2pvWGRFJGhYuXEi7du0wDIP33nuPWbNmOTWIjB0Ln3xivv7iC+jRw2lNizhMvFlnBCB16tR07drVbkWIiNhbpkyZ8PDwoEOHDsyYMcOpQWTatP8HkXHjFEREniXOyw2ePHmSgICAGGNHABo0aPDcRYmIPK+6devy66+/UqJECacGkXnzoFcv8/WwYTBggNOaFkmwbA4j58+f56233uLYsWNYLJbHFj57NIdfRMTZlixZQsWKFSlUqBCAdVybsyxeDF26mK/79TPDiIg8m83/XOjZsyf58+fn+vXrpEyZkhMnTrBr1y7Kly/Pjh07HFCiiMizzZs3j1atWuHj48O1a9ec3v7y5dCu3f+n8o4fb86gEZFnszmM7N27l5EjR5IpUybc3Nxwc3OjatWqjB07lg8//NARNYqI/Ke5c+fy3nvvAdC4cWOyZMnitLajo2HIEGjWDKKizEAyfbqCiIgtbA4jUVFR1lkzmTJl4sqVK4C56Nnp06ftW52IyDPMmTOHLn8/G+nZsydTp0512qJmwcHmuiGjR5vve/Uyp/A6cYiKSKJg85iREiVKcPToUfLnz0+lSpUYP348np6ezJ07lxdffNERNYqIPNGsWbOsaxr17t2bSZMmOS2InDpl7r57+rS518zcudC6tVOaFkl0bA4jgwcPJjQ0FICRI0fy5ptvUq1aNTJmzMjy5cvtXqCIyJMsX77cGkT69u3LhAkTnBZE1q41N727dw9y54bVq6FcOac0LZIoxWnRs38LCgoiffr0Tt3vIa606JlI4hAUFISvry81a9Zk/PjxTvn7JzoaxowxV1YFeO01WLECnDhERcSl4sWiZxEREaRIkYIjR45QokQJ6/EMGTLYrSARkdjIkCEDP/30EylTpnRKELl3zxycumqV+b5HD3Opdw8PhzctkujZFEY8PDzIkyeP1hIREZf4/PPPcXd3p8ffS5q+8MILTmn37FlzfMiJE+DpCTNnQseOTmlaJEmwecz3oEGD+OSTTwgKCnJEPSIiTzRlyhR69uzJ+++/zy+//OK0djdvhgoVzCCSPTvs3KkgImJvNg9g/eKLLzh79iw5cuQgb968j/3L5NChQ3YrTkQEYPLkyfTt2xcw/0FUqVIlh7dpGDBhAgwcaI4VqVwZvv/eDCQiYl82h5FGjRo5oAwRkSebNGkS/fr1A2DIkCGMGDHC4WNEQkPN3o9HEwQ7dTJ33vXycmizIkmWXWbTJCSaTSOScEyYMIEBf+80N2zYMIYPH+7wNi9eNMeHHD0KyZKZq6l26aIVVUXAcb9D47RO4J07d/jyyy8ZOHCgdezIoUOHuHz5st0KE5Gkbe/evdYgMnz4cKcEke3boXx5M4hkyWK+79pVQUTE0Wx+TPPbb7/h6+tL2rRpuXjxIp07dyZDhgysWrWKgIAAFi5c6Ig6RSSJqVy5MsOHD8disTD00cIeDmIYMGkSfPyxub9M+fLmFN7cuR3arIj8zebHNL6+vrz88suMHz+e1KlTc/ToUV588UV+/vlnWrRowcWLFx1Uqn3oMY1I/BYREYGHExfvCA6G9u3NVVQB2raF2bPNJd5FJKZ485jm119/tW5K9U85c+YkMDDQLkWJSNI0evRofH19CQkJcUp7x4+b03ZXrzbXD5k9G77+WkFExNlsDiNeXl7cvXv3seNnzpwhc+bMdilKRJKeUaNGMWTIEHbt2sWaNWsc3t6SJVCpEvzxB+TJA7t3a6CqiKvYHEYaNGjAyJEjiYiIAMBisRAQEMBHH31E48aN7V6giCR+I0aMsI4LGTt2LK1atXJYW2Fh5lLurVrB/ftQuzYcPGj2kIiIa9gcRiZNmkRISAhZsmThwYMHVK9enYIFC5I6dWrGjBnjiBpFJBH750yZcePG8fHHHzusrUuXoHp1czl3MDe827ABMmVyWJMiEgs2z6ZJmzYtW7duZffu3fz222+EhITw8ssv4+vr64j6RCSRMgyD4cOHM3LkSMBcU+TR4maOsG0bNG8ON29C+vSweDHUreuw5kTEBjbPprl06RK5E/B8N82mEYkfAgMDKVGiBLdu3WLixInW5d7tLToaxo6FIUPMKbwvvwwrV0L+/A5pTiRRizezafLly0f16tWZN28et2/ftlshIpK0ZMuWDT8/P2bMmOGwIHL7NjRoAIMHm0GkUyfYs0dBRCS+sTmMHDhwgIoVKzJy5EiyZ89Oo0aNWLlyJWFhYY6oT0QSEcMwuHDhgvV96dKl6d69u0PaOnwYypWD9evNPWXmz4d58zRtVyQ+sjmMlC1blgkTJhAQEMDGjRvJnDkz7733HlmzZqVDhw6OqFFEEgHDMBg4cCAlS5Zk9+7dDm3rq6/MXXYvXDB7QfbuBf31JBJ/xWlvGjCn9Pr4+DBv3jy2bdtG/vz5+eabb+xZm4gkEoZh8NFHHzFu3DhCQ0M5duyYQ9p5+BA6dzZ33A0Lg3r1zGm7Zcs6pDkRsZM4h5G//vqL8ePHU6ZMGSpWrEiqVKmYMWOGPWsTkUTAMAwGDBjAhAkTAPjiiy/o1q2b3dsJDTXXDPnyS3PhstGj4ccfzZkzIhK/2Ty1d86cOSxdupQ9e/ZQtGhRWrZsyQ8//EDevHkdUZ+IJGCGYdCvXz8mT54MwMyZMx0SRB4+hEaN4KefIG1aWLECatWyezMi4iA2h5HRo0fTvHlzPv/8c0qXLu2ImkQkETAMgz59+jB16lQAZs2aRdeuXe3eTkQEvPuuuY7ICy/Axo3meBERSThsDiMBAQFYtHmDiDxDZGQk586dA8we1ffee88BbUDLlrB2rTlLZu1aBRGRhMjmMGKxWLhz5w7z58/n999/B+Cll16iY8eOpE2b1u4FikjC5OHhwYoVK9i+fTtvvPGG3a8fHW0OVF2xAjw8zJ13fXzs3oyIOEGc1hkpUKAAU6ZMISgoiKCgIKZMmUKBAgU4dOiQI2oUkQTCMAy+//57Hi3s7OXl5ZAgYhjmZncLF4K7OyxfDq+/bvdmRMRJbA4jvXv3pkGDBly8eJFVq1axatUqLly4wJtvvkmvXr0cUKKIJASGYfD+++/TpEkT+vTp48B2oF8/mD3bnDWzcCG89ZbDmhMRJ7D5Mc2BAweYN28eyZL9/6PJkiVjwIABlC9f3q7FiUjCEB0dzfvvv8+sWbOwWCyUKlXKYW0NGwZ/T85h3jxo0cJhTYmIk9jcM5ImTRoCAgIeO37p0iVSp05tl6JEJOGIjo6me/fu1iDy1Vdf0b59e4e09dlnMGqU+frzz80xIyKS8NkcRpo2bUrHjh1Zvnw5ly5d4tKlSyxbtoxOnTrRvHlzR9QoIvFUdHQ03bp1Y86cOVgsFhYsWEC7du0c0tbnn8PAgebrzz6DDz5wSDMi4gI2P6aZOHEiFouFNm3aEBkZCZij5rt168Znn31m9wJFJP7q0aMHc+fOxWKx8M0339C6dWuHtDN/PvTsab4eMgQ++sghzYiIi1iMR8PebXT//n3rGgIFChQgZcqUdi3MUe7evUvatGkJDg4mTZo0ri5HJEFbunQp7dq146uvvqJVq1YOagNatTIHrvbtCxMmmANXRcT5HPU7NNZhJCoqihMnTlCoUCFSpEgR43sPHjzgjz/+oESJEri5xXm7G6dQGBGxr4CAAPLkyeOQa69eDe+8A1FR0K0bzJihICLiSo76HRrr5LBo0SI6dOiAp6fnY9/z8PCgQ4cOLF261G6FiUj8ExUVxeDBg7l8+bL1mKOCyMaN0LSpGUTatoUvvlAQEUmsYh1G5s+fT79+/XB3d3/se4+m9s6dO9euxYlI/BEVFUXHjh0ZM2YMtWvXJiIiwmFt+fvD22//f9+Z+fMhnne6ishziPUA1tOnT/PKK6889fsVKlSwLg8vIolLVFQUHTp0YOHChbi7uzN8+HA8PDwc0tbPP0P9+uZOvPXrw+LF5iqrIpJ4xfrfGqGhody9e/ep37937x7379+3S1EiEn9ERUXRrl07axBZtmwZ77zzjkPaWrMG3ngDQkOhVi347jtz3xkRSdxiHUYKFSrEzz///NTv7969m0KFCtmlKBGJH6Kiomjbti2LFy8mWbJkLF++nCZNmti9ndBQ6NLFXNb97l2oXt0MJsmT270pEYmHYh1GWrRoweDBg/ntt98e+97Ro0cZOnQoLbQus0iiMmjQIJYsWWINIo0bN7Z7G4cPQ7lyMHeuOUB1wADYsgUSyGoBImIHsZ7aGxERQe3atdm9eze+vr4ULVoUgFOnTrFt2zZeffVVtm7d6rDnyPaiqb0isXflyhVq167NqFGjeMvOu9FFR5t7zHzyiTlQNUcOWLQIatSwazMiYkcuX2cEzEAyZcoUli5dyh9//IFhGBQuXJgWLVrQq1evJ077jW8URkT+m2EYWP4xhzYyMjLGxpj2cOWKOV132zbz/VtvmZveZcxo12ZExM7iRRhJDBRGRJ4uIiKCNm3aUL9+fYc9dv3hB3ODu1u3zEcxU6dCp05aQ0QkIXD5omcikrhFRETQvHlz68aXgYGBdr3+/fvQtSs0amQGkbJl4eBB6NxZQUQkqVMYEREiIiJo1qwZ33//PZ6enqxYsYJs2bLZ7fqPBqnOmWO+79cP9u6Fv4eeiUgSZ98HwSKS4ISHh9OsWTNWr16Nl5cXq1ev5o033rDLtaOjzccwH39sDlLNnh0WLgRfX7tcXkQSCYURkSQsPDycd999lx9++AEvLy/WrFnD66+/bpdrX70K7dqZ03QBGjaEL7+ETJnscnkRSUT0mEYkCVu8eLE1iPzwww92CyJr10KpUmYQSZECZs0yd+BVEBGRJ4lVz0ifPn1ifcHJkyfHuRgRca727dvz+++/U6tWLWrXrv3c1zMMGDgQxo0z35cuDd9+C8WKPfelRSQRi1UYOXz4cIz3hw4dIjIykiJFigBw5swZ3N3dKVeunP0rFBG7CgsLA8DLywuLxcKECRPscl3DMFdPnTjRfN+nD3z6KXh52eXyIpKIxSqM+Pv7W19PnjyZ1KlT880335A+fXoAbt++Tfv27alWrZpjqhQRu3j48CGNGzfGzc2NlStX4mWnpGAY5kqqj4LIzJnQrZtdLi0iSYDNi57lzJmTLVu2ULx48RjHjx8/Tu3atbly5YpdC7Q3LXomSdXDhw9566232LRpEylSpGD37t28/PLLz31dw4AhQ2DMGPP9F19Ajx7PfVkRiYcc9TvU5tk0d+/e5caNG48dv3HjBvfu3bNLUSJiXw8fPqRRo0Zs3ryZFClSsH79ersEEYARI/4fRKZOVRAREdvZPJvmrbfeon379qxatYq//vqLv/76i++//56OHTvy9ttvO6JGEXkODx48oGHDhmzevJmUKVOyYcMGfHx87HLtUaPMMAIwaRL07GmXy4pIEmNzGJk9ezZvvPEGLVq0IG/evOTNm5cWLVrw+uuvM3PmzDgVMWPGDPLly0fy5MmpVKkS+/fvj9Xnli1bhsVioVGjRnFqVySxexREtmzZYg0i3t7edrn2p5/C0KHm6/HjzQGrIiJxEeeN8kJDQzl37hwABQoU4IUXXohTAcuXL6dNmzbMnj2bSpUqMXXqVFasWMHp06fJkiXLUz938eJFqlatyosvvkiGDBlYs2ZNrNrTmBFJSo4cOULVqlUB2LBhA6+99ppdrjt+PHz0kfl67FhzhVURSfzi3UZ5V69e5erVqxQqVIgXXniBuG7+O3nyZDp37kz79u156aWXmD17NilTpuSrr7566meioqJo2bIlI0aM4MUXX4zrjyCS6JUpU4ZNmzaxceNGuwWRSZP+H0RGjVIQEZHnZ3MYuXXrFjVr1qRw4cLUrVuXq1evAtCxY0f69u1r07XCw8M5ePAgvv/YqMLNzQ1fX1/27t371M+NHDmSLFmy0LFjx2e2ERYWxt27d2N8iSRm9+/f5/fff7e+r1q1qt2m3U+dam5yBzB8OAwebJfLikgSZ3MY6d27Nx4eHgQEBJAyZUrr8aZNm7Jp0yabrnXz5k2ioqLImjVrjONZs2Z96vblu3fvZv78+cybNy9WbYwdO5a0adNav3Lnzm1TjSIJSWhoKG+++SZVq1bl6NGjdr329OnQu7f5esgQGDbMrpcXkSTM5jCyZcsWxo0bR65cuWIcL1SoEH/++afdCnuSe/fu0bp1a+bNm0emWG5yMXDgQIKDg61fly5dcmiNIq4SGhpKvXr18Pf3JyIigvv379vt2jNnwocfmq8HDvz/DBoREXuweZ2R0NDQGD0ijwQFBdm8mmOmTJlwd3fn2rVrMY5fu3aNbNmyPXb+uXPnuHjxIvXr17cei46OBiBZsmScPn2aAgUKxPiMl5eX3VaZFImvQkJCqFevHrt27SJNmjRs3ryZV155xS7Xnjv3/2uHDBhgrilisdjl0iIiQBx6RqpVq8bChQut7y0WC9HR0YwfP97mtQs8PT0pV64cfn5+1mPR0dH4+flRuXLlx84vWrQox44d48iRI9avBg0a4OPjw5EjR/QIRpKke/fu8cYbb1iDyJYtW+wWRL78Erp0MV/37QuffaYgIiL2Z3PPyPjx46lZsyYHDhwgPDycAQMGcOLECYKCgtizZ4/NBfTp04e2bdtSvnx5KlasyNSpUwkNDaV9+/YAtGnThpw5czJ27FiSJ09OiRIlYnw+Xbp0AI8dF0kKHgWRPXv2kDZtWrZs2ULFihXtcu2vv4b33jNf9+wJEyYoiIiIY9gcRkqUKMGZM2f44osvSJ06NSEhIbz99tv06NGD7Nmz21xA06ZNuXHjBkOHDiUwMNA6FfHRoNaAgADc3OI8A1kkUXNzc8Pd3Z20adOydetWKlSoYJfrLlwIHTua+8588AFMmaIgIiKOY/OiZwEBAeTOnRvLE/5mCggIIE+ePHYrzhG06JkkNiEhIVy4cIGSJUs+97Wio81FzIYMMYNIt24wY4aCiIiY4s2iZ/nz53/iRnm3bt0if/78dilKRJ4uODiY+fPnW9+nSpXKLkHk9m1o2NBcO8Qw4P33zR14FURExNFsfkxjGMYTe0VCQkJInjy5XYoSkScLDg6mTp067Nu3j6CgIPr372+X6x4+DI0bw4UL4OVlTuXt0MEulxYReaZYh5E+f++CZbFYGDJkSIzpvVFRUezbt48yZcrYvUARMd25c4c6deqwf/9+MmTIEGPl4ufx9dfQvTs8fAj588P330PZsna5tIhIrMQ6jBw+fBgwe0aOHTuGp6en9Xuenp6ULl2afo/WiRYRu7pz5w61a9fm119/JUOGDPj5+T13+H/40Byc+uWX5vt69WDRIkif/vnrFRGxRazDiL+/PwDt27dn2rRpGvwp4iS3b9+mdu3aHDhwgIwZM+Ln50fp0qWf65oXLkCTJnDokDkmZNQoc2VVTVwTEVeweczI1KlTiYyMfOx4UFAQyZIlU0gRsaOIiAhrEMmUKRN+fn6UKlXqua65YQO0amUOWM2YEb79FmrVslPBIiJxYPO/g5o1a8ayZcseO/7dd9/RrFkzuxQlIiYPDw/atWtH5syZ2b59+3MFkagoGDrUfBxz+zZUrGj2jCiIiIir2bzOSIYMGdizZw/FihWLcfzUqVO8+uqr3Lp1y64F2pvWGZGE6M6dO9bVhuPi5k1o2RK2bDHfd+8OkyebM2dERGIr3qwzEhYW9sTHNBERETx48MAuRYkkZbdu3aJNmzYxgv3zBJFff4Vy5cwgkiKFOUh1xgwFERGJP2wOIxUrVmTu3LmPHZ89ezblypWzS1EiSdXNmzepWbMmixYtonXr1s91LcOAOXOgalUICICCBWHfPnO8iIhIfGLzANbRo0fj6+vL0aNHqVmzJgB+fn78+uuvbHnUBywiNnsURH777TeyZcvG5MmT43yt+/fNpdwfbbDdqBEsWABp09qlVBERu7K5Z+TVV19l79695MqVi++++461a9dSsGBBfvvtN6pVq+aIGkUSvRs3blCjRg1rEPH396do0aJxutbVq1CtmhlE3Nxg/HhYtUpBRETiL5sHsCZ0GsAq8c3169epWbMmx48fJ3v27Pj7+1OkSJE4XevYMXO2zKVLkCkTrFgB3t72rVdEkq54M4AV4Ny5cwwePJgWLVpw/fp1ADZu3MiJEyfsVphIUtG2bVuOHz9Ojhw52LFjR5yDyJYt8OqrZhApUsQcH6IgIiIJgc1hZOfOnZQsWZJ9+/bx/fffExISAsDRo0cZNmyY3QsUSeymT59OpUqV2LFjB4ULF47TNb78EurWhXv34LXX4Oef4cUX7VyoiIiD2BxGPv74Y0aPHs3WrVtj7E9To0YNfvnlF7sWJ5JYRUVFWV8XLFiQvXv3UqhQIZuvEx0Nn3wCnTubi5o9WkskQwZ7Visi4lg2h5Fjx47x1ltvPXY8S5Ys3Lx50y5FiSRmV69epVy5cmzYsMF6zGKx2Hydhw+hRQsYO9Z8P2SIuYaI1g8RkYTG5jCSLl06rl69+tjxw4cPkzNnTrsUJZJYXblyBW9vb44ePUqvXr2IiIiI03Vu3gRfX1i+HJIlg6+/hpEjzU3vREQSmjjtTfPRRx8RGBiIxWIhOjqaPXv20K9fP9q0aeOIGkUShcuXL+Pt7c2ZM2fIkycPmzZtwsPDw+br/PEHVK4Me/aY03U3b4Z27exfr4iIs9gcRj799FOKFi1K7ty5CQkJ4aWXXuK1116jSpUqDB482BE1iiR4ly9fxsfHhz/++IO8efOyY8cOXozDCNPdu80gcvYs5M1rDlStUcMBBYuIOFGc1xkJCAjg+PHjhISEULZs2TgNvnMFrTMizvbXX3/h4+PD2bNnrUEkX758Nl9n2TJo2xbCw6FCBfjxR8iWzf71iog8jaN+h9q8HPwjefLkIXfu3EDcBt+JJBUzZszg7Nmz5MuXD39/f5uDiGHAZ5+Zs2bAXNp9yRJImdLupYqIuEScFj2bP38+JUqUIHny5CRPnpwSJUrw5Zdf2rs2kURh9OjR9O/fP049IhER5rTdR0GkVy9YuVJBREQSF5t7RoYOHcrkyZP54IMPqFy5MgB79+6ld+/eBAQEMHLkSLsXKZLQXLt2jUyZMuHu7o67uzvjx4+3+RrBwfDOO7B1q7nHzLRp8P77DihWRMTFbB4zkjlzZj7//HOaN28e4/i3337LBx98EO/XGtGYEXG0P//8Ex8fH1599VUWLFiAu7u7zde4ds2cunv8uNkLsmwZ1K/vgGJFRGwQb8aMREREUL58+ceOlytXjsjISLsUJZJQXbx4ER8fHy5evIibmxu3bt0iS5YsNl3j1q3/B5Hs2WHdOnj5ZQcVLCISD9g8ZqR169bMmjXrseNz586lZcuWdilKJCG6cOEC3t7eXLx4kUKFCrFz506bg0hwMNSp8/8gsmuXgoiIJH5xmk0zf/58tmzZwiuvvALAvn37CAgIoE2bNvTp08d63uTJk+1TpUg8d/78eXx8fAgICKBQoUL4+/vbvCJxSIi52d3Bg5A5M/j5QcGCDipYRCQesTmMHD9+nJf//qfauXPnAMiUKROZMmXi+PHj1vM03VeSivPnz+Pt7c2lS5coXLgw/v7+5MiRw6ZrPHgADRuai5ilS2cOWi1WzDH1iojENzaHEX9/f0fUIZJgnTt3jmvXrlGkSBH8/f3Jnj27TZ8PD4cmTWD7dkiVCjZtgtKlHVSsiEg8ZPOYkRs3bjz1e8eOHXuuYkQSolq1arF+/fo4BZHISGjeHDZsgBQpYP16qFTJQYWKiMRTNoeRkiVLsn79+seOT5w4kYoVK9qlKJH47o8//uCPP/6wvvf19bU5iERFmRvcrVoFnp7www/w2mt2LlREJAGwOYz06dOHxo0b061bNx48eMDly5epWbMm48ePZ+nSpY6oUSRe+eOPP/D29rbuNxMXhgFdu5rLuidLZq6qWquWnQsVEUkgbA4jAwYMYO/evfz000+UKlWKUqVK4eXlxW+//cZbb73liBpF4o3Tp09TvXp1rly5Qrp06eK06I9hmMu6f/mlubLqkiVa0ExEkrY47U1TsGBBSpQowcWLF7l79y5NmzYlm7YPlUTu9OnT+Pj4cPXqVUqUKMH27dttXkcEYNAg+Pxz8/VXX8G779q5UBGRBMbmMLJnzx5KlSrFH3/8wW+//casWbP44IMPaNq0Kbdv33ZEjSIud+rUKby9vbl69SolS5aMcxAZMwbGjjVfz5wJbdvauVARkQTI5jBSo0YNmjZtyi+//EKxYsXo1KkThw8fJiAggJIlSzqiRhGXOn36NN7e3gQGBlKqVCm2b99O5syZbb7OlCkweLD5euJE6NbNzoWKiCRQNq8zsmXLFqpXrx7jWIECBdizZw9jxoyxW2Ei8UXmzJnJkSMH2bJlY9u2bWTKlMnma8yeDY8WJx45Evr2tXORIiIJmM279iZ02rVX4iIoKAjDMMiYMaPNn1248P+PYz76yHxMowWKRSQhctTv0Fg/pqlbty7BwcHW95999hl37tyxvr916xYvvfSS3QoTcaXjx48zb9486/sMGTLEKYisWAHt25uvP/hAQURE5EliHUY2b95MWFiY9f2nn35KUFCQ9X1kZCSnT5+2b3UiLnDs2DF8fHx47733WLZsWZyvs3YttGgB0dHQsSNMnaogIiLyJLEOI/9+mpPEnu5IEvHbb79Ro0YNbt68Sbly5ahdu3acrvPDD9C4sbnce4sWMGeOuaaIiIg8Tn89ivzt6NGj1iBSvnx5tm7dSoYMGWy+zqpV5sZ3ERHmGiILFoC7u/3rFRFJLGIdRiwWC5Z/9TH/+71IQnXkyBFq1KjBrVu3qFChAlu3biV9+vQ2X2fFCjOAPNoAb8kS8PBwQMEiIolIrKf2GoZBu3bt8PLyAuDhw4d07dqVF154ASDGeBKRhOTatWvUrFmToKAgKlasyJYtW0ibNq3N1/n2W2jd2twAr3Vr+Ppr9YiIiMRGrMNI238tFdmqVavHzmnTps3zVyTiZFmzZqVXr16sX7+ezZs3xymILFpk7sAbHW3Onpk3T0FERCS2tM6IyN/CwsKsPX+2WLAAOnQwN8Dr1EmDVUUk8XL5OiMiicmvv/5Kw4YNCQkJsR6LSxD58sv/B5Fu3RRERETiQn9tSpKzf/9+atWqxY8//sjQoUPjfJ05c6BzZzOIfPABzJihICIiEhf6q1OSlH379lGrVi2Cg4OpWrUqI0aMiNN1ZsyArl3N1716wbRpWtBMRCSuFEYkyfjll1+oXbs2d+/epVq1amzcuJHUqVPbfJ2pU+H9983X/fvD5MkKIiIiz0NhRJKEvXv3WoPIa6+9xoYNG0iVKpXN15k0CXr3Nl8PHAjjximIiIg8L4URSfQiIiJo1aoV9+7dw9vbO85BZNw46NfPfD1kCIwZoyAiImIPCiOS6Hl4eLBq1SqaNGnCunXrrAv12WLMGPj4Y/P1iBEwcqSCiIiIvWidEUm0Hjx4QIoUKZ7rGoZhBo/hw833Y8bAJ588f20iIgmR1hkRscFPP/3Eiy++yO7du+N8DcOAoUP/H0TGjVMQERFxBIURSXR27drFG2+8QWBgIJMnT47TNcLDoWNHGD3afD95MgwYYMciRUTEKtZ704gkBDt37qRu3brcv3+f2rVrs2TJEpuvceMGNG4MP/1kLmL2xRfm6qoiIuIYCiOSaOzYsYN69epx//596tSpw+rVq20eM3L8ONSvDxcvQpo0sHw5vP66Y+oVERGTHtNIorB9+3Zrj8jrr7/OmjVrbA4iGzZAlSpmEClQAH75RUFERMQZFEYkUZg7dy4PHjygbt26rF69muTJk8f6s4YBU6aYPSL37kH16rBvHxQr5sCCRUTESo9pJFH45ptvKFWqFH379rVp993wcOjeHebPN9937myOEfH0dFChIiLyGPWMSIL1+++/82iZHC8vLz755BObgsjNm1CrlhlE3NzM3pE5cxREREScTWFEEqTNmzdTtmxZevbsSVzW7Tt5EipVgl27IHVqWLfO3H1Xq6qKiDifwogkOJs2baJhw4aEhYUREBBAZGSkTZ/fuBEqV4bz5yF/fti7F954w0HFiojIMymMSIKyceNGGjVqRFhYGI0aNeK7777Dw8MjVp81DJg6Fd58E+7ehddeg/37oXhxx9YsIiL/TWFEEowNGzZYg8hbb73Fd999h2csB3iEh0OXLtC7N0RHQ4cOsHUrZMrk4KJFROSZNJtGEoR169bRuHFjwsPDady4Md9++22se0Ru3YImTWDHDnNMyMSJZijR+BARkfhBYUQShJCQECIjI2nSpAlLly6NdRA5cQIaNoRz58yBqt9+C/XqObhYERGxicKIJAjNmjUjR44cVK5cOVZB5OFD+PRT+OwziIgwB6quXavxISIi8ZHGjEi8tXHjRi5fvmx9/9prr8UqiGzfDqVKwahRZhCpV89cUVVBREQkflIYkXhp9erVNGjQAB8fH27cuBGrz9y4AW3bQs2a8McfkD07rFxp9ohkzuzggkVEJM4URiTeWbVqFe+++y6RkZGUL1+e9OnT/+f5hgELFph7ySxcaA5M7d4dfv8dGjfWQFURkfhOY0YkXvn+++9p2rQpUVFRtGjRgm+++YZkyZ7+x/T0aeja1ZwpA+bjmTlz4JVXnFOviIg8P/WMSLyxYsUKaxBp1aoVCxcufGoQCQuDESPM8LFjB6RIAePHw4EDCiIiIgmNekYkXli7di3NmzcnKiqK1q1b8/XXX+Pu7v7Ec3fsMHtDTp8237/+Osycac6YERGRhEdhROKFcuXKUaBAASpXrsz8+fOfGERu3YL+/eHrr8332bLBtGnwzjsaFyIikpApjEi8kCNHDvbs2UP69OkfCyKGAYsWQd++cPOmGTy6djXXEUmXzjX1ioiI/SiMiMssXboUwzBo2bIlAJmesFHM9evQogX4+ZnvS5SAuXPNXXdFRCRxUBgRl1iyZAlt2rQBoECBArzyhFGnZ8+a40HOnTMHqA4bBn36QCxXghcRkQRCYUScbtGiRbRr147o6Gg6d+5MxYoVHzvnwAGoW9dcyCxfPti4EYoWdX6tIiLieJraK071zTff0LZtW6Kjo+nSpQuzZ8/GzS3mH8NNm8Db2wwiZcvC3r0KIiIiiVm8CCMzZswgX758JE+enEqVKrF///6nnjtv3jyqVatG+vTpSZ8+Pb6+vv95vsQfCxYsoH379hiGQdeuXZk5c+ZjQeSbb6B+fQgNBV9fcxpvtmyuqVdERJzD5WFk+fLl9OnTh2HDhnHo0CFKly5NnTp1uH79+hPP37FjB82bN8ff35+9e/eSO3duateuHWNDNYl/9u3bR4cOHTAMg27dujFjxowYQcQwYOxYaNcOIiOhZUtYvx7SpHFdzSIi4hwWwzAMVxZQqVIlKlSowBdffAFAdHQ0uXPn5oMPPuDjjz9+5uejoqJInz49X3zxhXVA5D+FhYURFhZmfX/37l1y585NcHAwafSbzmkMw6B3795ERETwxRdfYPnHwiBRUfDhh+bCZWCuJfLZZ+Dm8qgsIiL/dPfuXdKmTWv336EuHcAaHh7OwYMHGThwoPWYm5sbvr6+7N27N1bXuH//PhEREWTIkOGJ3x87diwjRoywS71iO8MwsFgsWCwWpkyZAhAjiDx8aPaCrFplrh8yZQr07OmqakVExBVc+m/PmzdvEhUVRdasWWMcz5o1K4GBgbG6xkcffUSOHDnw9fV94vcHDhxIcHCw9evSpUvPXbfEzty5c2nYsKG1Z+pRKHnk9m2oXdsMIp6esGyZgoiISFKUoKf2fvbZZyxbtowdO3aQPHnyJ57j5eWFl5eXkyuTOXPm0LVrVwAWL15Mx44dY3z/0iVzDZGTJ81xIT/8YM6gERGRpMelYSRTpky4u7tz7dq1GMevXbtGtmdMoZg4cSKfffYZ27Zto1SpUo4sU2w0a9YsunfvDkDv3r3p0KFDjO8fP24GkcuXIUcOcw0R/ScUEUm6XPqYxtPTk3LlyuH3aK1vzAGsfn5+VP6P9b7Hjx/PqFGj2LRpE+XLl3dGqRJLM2fOtAaRvn37MmnSpBiPZnbuhKpVzSBSrJi5hoiCiIhI0ubyxzR9+vShbdu2lC9fnooVKzJ16lRCQ0Np3749AG3atCFnzpyMHTsWgHHjxjF06FCWLl1Kvnz5rGNLUqVKRapUqVz2c4i5Xsz7778PQP/+/Rk3blyMILJypTlYNTwcXn0VfvwRnjLuWEREkhCXh5GmTZty48YNhg4dSmBgIGXKlGHTpk3WQa0BAQEx1qOYNWsW4eHhNGnSJMZ1hg0bxvDhw51ZuvzD9evXrbOiBgwYwGeffRYjiHzxhTl91zCgUSNYutTcb0ZERMTl64w4m6PmSAvs3r2bbdu2MWzYMGsQuX0bBg6EOXPMc7p1g+nTwd3dhYWKiEicOOp3qMKIPJebN2+SKVOmx45HR8OCBfDxx+YeMwCjR8Mnn5jriYiISMLjqN+hWuNS4mzy5MkULVqUI0eOxDh+4ABUqQIdO5pB5KWXwM8PBg1SEBERkccpjEicTJw4kb59+3Lr1i02bdoEwK1b0KULVKwI+/ZB6tQwaRIcOQI1ari2XhERib9cPoBVEp4JEyYwYMAAwBw43L//x8yebfZ8BAWZ57RqBePHQ/bsLixUREQSBIURscm4ceOsGxgOHz6cOnWGUakSHDxofr9kSZgxA6pVc2GRIiKSoCiMSKyNHTuWTz75BIABA0bw559DebQ2XZo0MGoUdO8OyfSnSkREbKBfGxIrERERbN68GYB69UYxd+5g7twxv9euHXz2Gfxrv0MREZFYURiRWPHw8OCTT9Zx/vxq1q9vDUDZsuYjmf9YuV9EROSZNJtG/tPOnTu5fx/atoU6dVJx6VJr0qeHWbPg118VRERE5PkpjMhTjRgxAm9vb0qVGsnCheYaIZ07w5kz0LWrVlEVERH70GMaeYxhGAwfPpyRI0cCcO5cclKmhPXrwdvbtbWJiEjiozAiMRiGwbBhwxg1atTfR8bj5dWftWsVRERExDEURsTKMAyGDh3K6NGj/z4yEQ+PvqxapRVURUTEcRRGxGrIkCGMGTPm73eTcHPrw7ffQt26Li1LREQSOYURscqePcffr6ZgsfTim2+gcWOXliQiIkmAwohY3bjRHagKlGLOHHN/GREREUfT1N4kzDAMPv/8c27dusWECTBiBEAppk41p/CKiIg4g8JIEmUYBgMGDKBnz56UKVOLAQPCAfj0U+jZ08XFiYhIkqLHNEmQYRj079+fSZMmAfDXX50ATwYNgoEDXVubiIgkPQojSYxhGPTt25cpU6b8fWQW0JXevc1dd0VERJxNYSQJMQyD3r17M23aNAAsltkYRhe6dIFJk8zl3kVERJxNY0aSkFGjRlmDiLv7XAyjC61bw8yZCiIiIuI6CiNJSIsWLciSJTfJks0jKqozTZrAV1+Bm/4UiIiIC+kxTRJy82ZBQkJ+JzLyBerVgyVLIJn+BIiIiIvp38SJWHR0NL169WLdunUcOgSvvw73779AzZqwciV4erq6QhEREfWMJFrR0dH06NGD2bNnM2vWHJIlO8/9+9mpWhV++AGSJ3d1hSIiIiaFkUQoOjqabt26MXfuXCwWC+HhcwkPz46PD6xeDS+84OoKRURE/k+PaRKZ6Ohounbt+ncQccMwFgKt6dIFNm+GtGldXaGIiEhM6hlJRKKjo3nvvfeYP38+YAYRN7eWTJ0K77+v6bsiIhI/KYwkIt988401iMAi0qZtwfLlUKeOqysTERF5OoWRRCRjxjYkS/YTkZG1KFCgOevWQdGirq5KRETkvymMJHBRUVEYBkyb5k7//u4Yxld4e5tTdzNmdHV1IiIiz6YwkoBFRUXRtm17fvnF4Ny5BYA7nTvDjBng4eHq6kRERGJHYSSBioqKolmztqxcuQRwx2LpwZQpr/DhhxqoKiIiCYvCSAIUGRlJo0ZtWb9+KZCMFCmWsWrVK7z+uqsrExERsZ3CSAITGRlJzZqt2bVrGZCMrFm/w9//LYoVc3VlIiIicaMwkoBERERSsWIrjhxZDiSjePEV7NzZSANVRUQkQdMKrAlEeDi8++5RjhxZDXhQq9ZKDh1SEBERkYRPPSMJwM2b0Lgx7NpVDotlDR06RDJvXn0NVBURkURBYSSeO3o0ggYNAgkIyE3q1LBs2RvUrevqqkREROxHj2nisbVrIyhfvhkBAZXJlesse/eiICIiIomOwkg8ZBgwaVI4DRo0JTJyFRbLDSZNOkfx4q6uTERExP70mCaeiYiAbt3CmT//XeAH3N29WL16DfXra7c7ERFJnBRG4pFbt+Dtt8PYtesdYC3Jknmxdu0PvP66goiIiCReCiPxxKlTUK9eGOfPNwHW4emZnLVrf6B27dquLk1ERMShNGYkHtiyBV55Bc6ff4Cn5xW8vJKzbt2PCiIiIpIkKIy4kGHA9OnwxhsQHAxVq6bj2LGtbNu2lVq1arm6PBEREadQGHERc6AqfPjhQ6Kjf6B9e9i2DQoXzkDVqlVdXZ6IiIjTKIy4QFAQvP46zJnzEHgLaET58rPw8nJ1ZSIiIs6nAaxOdvo0vPkmnD37AHf3RkRFbSFlypS89JK23RURkaRJPSNOtHUrVKpkBpHkyRtag8iGDRvw9vZ2dXkiIiIuoTDiJDNmPBqoep+0aRvw8OFWXnjhBTZu3Ej16tVdXZ6IiIjLKIw4WEQE9OgB778PUVGRZMvWgODgbdYg8tprr7m6RBEREZdSGHGg27fN3pCZM8FigXHjktGtW3VSpUrFpk2bqFatmqtLFBERcTkNYHWQM2egfn3zf194AZYuhQYNAIbQvn07cufO7eoSRURE4gX1jDjAtm3mQNUzZyBXrlDefrsvNWqEWL+vICIiIvJ/6hmxs5kz4cMPISoKKlYMwc2tHosW7eL27TOsXbvW1eWJiIjEO+oZsZPISHOQao8eZhBp1iwED4+6/PLLLtKkScPgwYNdXaKIiEi8pDBiB7dvQ9265vRdiwWGD7/HX3+9wZ49P5E2bVq2bt1KpUqVXF2miIhIvKQw8pz++MPccXfrVnOg6pIl99i69Q12795tDSIVK1Z0dZkiIiLxlsLIc9i+/f8DVXPnht27YdmyVuzZs4d06dKxbds2KlSo4OoyRURE4jWFkTiaPRtq1zYf0VSqBPv3Q5kyMGrUKAoVKsS2bdsoX768q8sUERGJ9zSbxkaRkdCnD0yfbr5v2RLmzTNIkcICQKlSpTh58iTJkunWioiIxIZ6Rmxw/7654+6jIDJmDEyffoc33/Rl9+7d1vMURERERGJPvzVt8P77sHkzpEwJixeDj88dateuza+//sr58+c5ffo0np6eri5TREQkQVEYiaWvvza/3Nxg7VooW/Y2tWrV5sCBA2TMmJE1a9YoiIiIiMSBwkgsHD0K3bubr0eOfBREanHw4EEyZcqEn58fpUqVcm2RIiIiCZTCyDPcvQvvvAMPH5o78HbpEoSvby0OHTpEpkyZ2L59OyVLlnR1mSIiIgmWBrD+B8OAjh3Nhc1y54ZFi2DChHEcOnSIzJkz4+/vryAiIiLynNQz8h+mT4eVK8HDA777DjJmNNcRuXHjBn379qV48eKuLlFERCTBUxh5in37oF8/8/Xo0XepVCk1YMHT05OvvvrKpbWJiIgkJnpM8wS3bpnjRCIioEGDGyxeXJWePXtiGIarSxMREUl0FEb+JToaWreGS5cgf/7rnD1bg2PHjrFixQoCAwNdXZ6IiEiiozDyL2PHwsaN4OV1HTe3Gpw8eZzs2bOzY8cOsmfP7uryREREEh2FkX/w94ehQwGukTGjD+fOnSBHjhzs2LGDIkWKuLo8ERGRRElh5G9Xr0Lz5hAdHUjatD5cuXKSnDlzsmPHDgoXLuzq8kRERBIthRHMnXibNYNr1yBv3v3cu3faGkQKFSrk6vJEREQSNU3tBQYPhl27IHVq2LKlAceOfUfp0qUpWLCgq0sTERFJ9JJ8GFm3DsaNuwpEM39+TgoXhsKFG7u6LBERkSQjST+muXgRWrW6AniTNq03VapcdnVJIiIiSU6SDSNhYdCw4WWCg72BM6RNG054eLiLqxIREUl64kUYmTFjBvny5SN58uRUqlSJ/fv3/+f5K1asoGjRoiRPnpySJUuyYcMGm9vs2fMyv/3mDfxBrlx52blzJ/nz54/bDyAiIiJx5vIwsnz5cvr06cOwYcM4dOgQpUuXpk6dOly/fv2J5//88880b96cjh07cvjwYRo1akSjRo04fvy4Te1++2094CxZs+Zj9+6d5MuX7/l/GBEREbGZxXDxhiuVKlWiQoUKfPHFFwBER0eTO3duPvjgAz7++OPHzm/atCmhoaGsW7fOeuyVV16hTJkyzJ49+5nt3b17l7Rp0wKQLl1+jhzxJ2/evHb6aURERBKvR79Dg4ODSZMmjd2u69LZNOHh4Rw8eJCBAwdaj7m5ueHr68vevXuf+Jm9e/fSp0+fGMfq1KnDmjVrnnh+WFgYYWFh1vfBwcEAeHrmYceOtaRPn567d+8+508iIiKS+D36fWnvfgyXhpGbN28SFRVF1qxZYxzPmjUrp06deuJnAgMDn3j+0zaxGzt2LCNGjHjseHh4AGXKlIhj5SIiIknXrVu3rE8Z7CHRrzMycODAGD0pd+7cIW/evAQEBNj1RsrT3b17l9y5c3Pp0iW7duvJ0+meO5/uufPpnjtfcHAwefLkIUOGDHa9rkvDSKZMmXB3d+fatWsxjl+7do1s2bI98TPZsmWz6XwvLy+8vLweO542bVr94XWyNGnS6J47me658+meO5/uufO5udl3/otLZ9N4enpSrlw5/Pz8rMeio6Px8/OjcuXKT/xM5cqVY5wPsHXr1qeeLyIiIvGbyx/T9OnTh7Zt21K+fHkqVqzI1KlTCQ0NpX379gC0adOGnDlzMnbsWAB69uxJ9erVmTRpEvXq1WPZsmUcOHCAuXPnuvLHEBERkThyeRhp2rQpN27cYOjQoQQGBlKmTBk2bdpkHaQaEBAQozuoSpUqLF26lMGDB/PJJ59QqFAh1qxZQ4kSsRuM6uXlxbBhw5746EYcQ/fc+XTPnU/33Pl0z53PUffc5euMiIiISNLm8hVYRUREJGlTGBERERGXUhgRERERl1IYEREREZdKlGFkxowZ5MuXj+TJk1OpUiX279//n+evWLGCokWLkjx5ckqWLMmGDRucVGniYcs9nzdvHtWqVSN9+vSkT58eX1/fZ/43ksfZ+uf8kWXLlmGxWGjUqJFjC0yEbL3nd+7coUePHmTPnh0vLy8KFy6sv19sZOs9nzp1KkWKFCFFihTkzp2b3r178/DhQydVm/Dt2rWL+vXrkyNHDiwWy1P3ffunHTt28PLLL+Pl5UXBggVZsGCB7Q0bicyyZcsMT09P46uvvjJOnDhhdO7c2UiXLp1x7dq1J56/Z88ew93d3Rg/frxx8uRJY/DgwYaHh4dx7NgxJ1eecNl6z1u0aGHMmDHDOHz4sPH7778b7dq1M9KmTWv89ddfTq484bL1nj9y4cIFI2fOnEa1atWMhg0bOqfYRMLWex4WFmaUL1/eqFu3rrF7927jwoULxo4dO4wjR444ufKEy9Z7vmTJEsPLy8tYsmSJceHCBWPz5s1G9uzZjd69ezu58oRrw4YNxqBBg4xVq1YZgLF69er/PP/8+fNGypQpjT59+hgnT540pk+fbri7uxubNm2yqd1EF0YqVqxo9OjRw/o+KirKyJEjhzF27Ngnnv/uu+8a9erVi3GsUqVKRpcuXRxaZ2Ji6z3/t8jISCN16tTGN99846gSE5243PPIyEijSpUqxpdffmm0bdtWYcRGtt7zWbNmGS+++KIRHh7urBITHVvveY8ePYwaNWrEONanTx/j1VdfdWidiVVswsiAAQOM4sWLxzjWtGlTo06dOja1lage04SHh3Pw4EF8fX2tx9zc3PD19WXv3r1P/MzevXtjnA9Qp06dp54vMcXlnv/b/fv3iYiIsPvGS4lVXO/5yJEjyZIlCx07dnRGmYlKXO75jz/+SOXKlenRowdZs2alRIkSfPrpp0RFRTmr7AQtLve8SpUqHDx40Poo5/z582zYsIG6des6peakyF6/Q12+Aqs93bx5k6ioKOvqrY9kzZqVU6dOPfEzgYGBTzw/MDDQYXUmJnG55//20UcfkSNHjsf+QMuTxeWe7969m/nz53PkyBEnVJj4xOWenz9/nu3bt9OyZUs2bNjA2bNn6d69OxEREQwbNswZZSdocbnnLVq04ObNm1StWhXDMIiMjKRr16588sknzig5SXra79C7d+/y4MEDUqRIEavrJKqeEUl4PvvsM5YtW8bq1atJnjy5q8tJlO7du0fr1q2ZN28emTJlcnU5SUZ0dDRZsmRh7ty5lCtXjqZNmzJo0CBmz57t6tISrR07dvDpp58yc+ZMDh06xKpVq1i/fj2jRo1ydWnyDImqZyRTpky4u7tz7dq1GMevXbtGtmzZnviZbNmy2XS+xBSXe/7IxIkT+eyzz9i2bRulSpVyZJmJiq33/Ny5c1y8eJH69etbj0VHRwOQLFkyTp8+TYECBRxbdAIXlz/n2bNnx8PDA3d3d+uxYsWKERgYSHh4OJ6eng6tOaGLyz0fMmQIrVu3plOnTgCULFmS0NBQ3nvvPQYNGmT3be/l6b9D06RJE+teEUhkPSOenp6UK1cOPz8/67Ho6Gj8/PyoXLnyEz9TuXLlGOcDbN269annS0xxuecA48ePZ9SoUWzatIny5cs7o9REw9Z7XrRoUY4dO8aRI0esXw0aNMDHx4cjR46QO3duZ5afIMXlz/mrr77K2bNnrcEP4MyZM2TPnl1BJBbics/v37//WOB4FAYNbcPmEHb7HWrb2Nr4b9myZYaXl5exYMEC4+TJk8Z7771npEuXzggMDDQMwzBat25tfPzxx9bz9+zZYyRLlsyYOHGi8fvvvxvDhg3T1F4b2XrPP/vsM8PT09NYuXKlcfXqVevXvXv3XPUjJDi23vN/02wa29l6zwMCAozUqVMb77//vnH69Glj3bp1RpYsWYzRo0e76kdIcGy958OGDTNSp05tfPvtt8b58+eNLVu2GAUKFDDeffddV/0ICc69e/eMw4cPG4cPHzYAY/Lkycbhw4eNP//80zAMw/j444+N1q1bW89/NLW3f//+xu+//27MmDFDU3sfmT59upEnTx7D09PTqFixovHLL79Yv1e9enWjbdu2Mc7/7rvvjMKFCxuenp5G8eLFjfXr1zu54oTPlnueN29eA3jsa9iwYc4vPAGz9c/5PymMxI2t9/znn382KlWqZHh5eRkvvviiMWbMGCMyMtLJVSdsttzziIgIY/jw4UaBAgWM5MmTG7lz5za6d+9u3L592/mFJ1D+/v5P/Pv50X1u27atUb169cc+U6ZMGcPT09N48cUXja+//trmdi2Gob4rERERcZ1ENWZEREREEh6FEREREXEphRERERFxKYURERERcSmFEREREXEphRERERFxKYURERERcSmFEREREXEphRERERFxKYURERERcSmFERGx8vb2plevXrF+76hrJFSJ+WcTcaRkri5ARP5bYGAgY8aMYf369Vy+fJksWbJQpkwZevXqRc2aNR3a9qpVq/Dw8LDrZ/75fW9vb8qUKcPUqVOfp0wA2rVrx507d1izZs1zX0tEnEthRCQeu3jxIq+++irp0qVjwoQJlCxZkoiICDZv3kyPHj04derUY58JDw/H09PTLu1nyJDB7p+JyzVFJHHTYxqReKx79+5YLBb2799P48aNKVy4MMWLF6dPnz788ssvgNm78P7779OrVy8yZcpEnTp1AIiOjmbs2LHkz5+fFClSULp0aVauXGm9dmhoKG3atCFVqlRkz56dSZMmPdb+kx47REZG8v7775M2bVoyZcrEkCFD+Ofm3896VPHo++3atWPnzp1MmzYNi8WCxWLh4sWLLFy4kIwZMxIWFhbjc40aNaJ169a23sJY3Yu5c+eSI0cOoqOjY3yuYcOGdOjQIVbXiK39+/fj7e1NihQpKFq0KAcOHGDu3Lk0aNAgTj+bSGKgMCISTwUFBbFp0yZ69OjBCy+88Nj306VLZ339zTff4OnpyZ49e5g9ezYAY8eOZeHChcyePZsTJ07Qu3dvWrVqxc6dOwHo378/O3fu5IcffmDLli3s2LGDQ4cOPbOub775hmTJkrF//36mTZvG5MmT+fLLL23++aZNm0blypXp3LkzV69e5erVq+TOnZt33nmHqKgofvzxR+u5169fZ/369dZgYKtn3Yt33nmHW7du4e/vb/3Mo/vfsmXLWF0jNn755ReqV69OvXr1+O233yhWrBgjR45k3LhxjBgxIk4/m0hioMc0IvHU2bNnMQyDokWLPvPcQoUKMX78eOv7sLAwPv30U7Zt20blypUBePHFF9m9ezdz5syhXLlyzJ8/n8WLF1vHnXzzzTfkypXrmW3lzp2bKVOmYLFYKFKkCMeOHWPKlCl07tzZpp8vbdq0eHp6kjJlSrJly2Y9niJFClq0aMHXX3/NO++8A8DixYvJkycP3t7eNrUBz74X1atXJ3369LzxxhssXbrUej9WrlxJpkyZ8PHxidU1YqNPnz6888479O/fH4DmzZvTvHlzGjZsSNmyZW3+2UQSC4URkXjqn48+nqVcuXIx3p89e5b79+9Tq1atGMfDw8MpW7Ys586dIzw8nEqVKlm/lyFDBooUKfLMtl555RUsFov1feXKlZk0aRJRUVG4u7vHuub/0rlzZypUqMDly5fJmTMnCxYsoF27djHaja1n3YtHWrZsSefOnZk5cyZeXl4sWbKEZs2a4ebmFutr/Je//vqLvXv3MnHiROuxZMmSYRiGekUkyVMYEYmnChUqhMVieeIg1X/792OckJAQANavX0/OnDljfM/Ly4ugoCD7FeoAZcuWpXTp0ixcuJDatWtz4sQJ1q9fH6drPetePFK/fn0Mw2D9+vVUqFCBn376iSlTpth0jf/y+++/A/Dyyy9bj50+fZqKFStSsmRJG38qkcRFYUQknsqQIQN16tRhxowZfPjhh48Fjjt37sQYN/JPL730El5eXgQEBDzxEUL69Onx8PBg37595MmTB4Dbt29z5syZZz5y2LdvX4z3v/zyC4UKFYpTr4inpydRUVFP/F6nTp2YOnUqly9fxtfXl9y5c9t8fXj2vXgkefLkvP322yxZsoSzZ89SpEgRa3CI7TX+S3BwMO7u7tbenaCgICZOnEjp0qXjdD2RxERhRCQemzFjBq+++ioVK1Zk5MiRlCpVisjISLZu3cqsWbOs/9r+t9SpU9OvXz969+5NdHQ0VatWJTg4mD179pAmTRratm1Lx44d6d+/PxkzZiRLliwMGjQIN7dnj2kPCAigT58+dOnShUOHDjF9+vQnzsSJjXz58rFv3z4uXrxIqlSpyJAhg7WGFi1a0K9fP+bNm8fChQtjdb3g4GCOHDkS41jGjBmfeS8eadmyJW+++SYnTpygVatW1uOxuZ/PUqZMGaKiohg/fjzvvPMOPXv2JF++fJw8eZI///yTvHnzxupnFEmMFEZE4rEXX3yRQ4cOMWbMGPr27cvVq1fJnDkz5cqVY9asWf/52VGjRpE5c2bGjh3L+fPnSZcuHS+//DKffPIJABMmTCAkJIT69euTOnVq+vbtS3Bw8DNratOmDQ8ePKBixYq4u7vTs2dP3nvvvTj9fP369aNt27a89NJLPHjwgAsXLpAvXz7AHODauHFj1q9fT6NGjWJ1vR07djw2hqNjx47MmzfvP+/FIzVq1CBDhgycPn2aFi1axPjes+7nsxQsWJCRI0cybdo0Pv30U5o1a8bSpUupXbs2r7/++lODpUhSYDFsGSUnIuJENWvWpHjx4nz++eeuLkVEHEhhRETindu3b7Njxw6aNGnCyZMnYzXLR0QSLj2mEZF4p2zZsty+fZtx48YpiIgkAeoZEREREZfScvAiIiLiUgojIiIi4lIKIyIiIuJSCiMiIiLiUgojIiIi4lIKIyIiIuJSCiMiIiLiUgojIiIi4lIKIyIiIuJSCiMiIiLiUv8D1c1b3GhcbEsAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -915,7 +976,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "id": "bc5e4c30", "metadata": {}, "outputs": [ @@ -923,10 +984,10 @@ "name": "stdout", "output_type": "stream", "text": [ - "The `RestrictedPrior` rejected 1.4%ined: 1204\n", + "The `RestrictedPrior` rejected 3.7%ined: 996\n", " of prior samples. You will get a speed-up of\n", - " 1.4%.\n", - " Neural network successfully converged after 125 epochs." + " 3.9%.\n", + " Neural network successfully converged after 190 epochs." ] } ], diff --git a/tutorials/19_flowmatching_and_scorematching.ipynb b/tutorials/19_flowmatching_and_scorematching.ipynb new file mode 100644 index 000000000..d735b214a --- /dev/null +++ b/tutorials/19_flowmatching_and_scorematching.ipynb @@ -0,0 +1,338 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Flow-Matching Posterior Estimation (FMPE) and Neural Posterior Score Estimation (NPSE)\n", + "\n", + "`sbi` also incorporates recent algorithms based on Flow Matching and Score Matching generative models, which are also referred to as Continuous Normalizing Flows (CNF) and Denoising Diffusion Probabilistic Models (DDPM), respectively.\n", + "\n", + "At the highest level, you can conceptualize FMPE and NPSE as tackling the exact same problem as (S)NPE, i.e., estimating the posterior from simulations, but replacing Normalizing Flows with different conditional density estimators. \n", + "\n", + "Flow Matching and Score Matching, as generative models, are also quite similar to Normalizing Flows, where a deep neural network parameterizes the transformation from a base distribution (e.g., Gaussian) to a more complex one that approximates the target density, but they differ in what this transformation looks like (more on that below). \n", + "\n", + "Beyond that, Flow Matching and Score Matching offer different benefits and drawbacks compared to Normalizing Flows, which make them better (or worse) choices for some problems. For examples, Score Matching (Diffusion Models) are known to be very flexible and can model high-dimensional distributions, but are comparatively slow during sampling.\n", + "\n", + "In this tutorial, we take a brief look at the API for `FMPE` and `NPSE`, their pros and cons, as well as highlight some notable options.\n", + "\n", + "For more information, see:\n", + "\n", + "**Score Matching**:\n", + "- Hyvärinen, A. \"Estimation of Non-Normalized Statistical Models by Score Matching.\" JMLR 2005.\n", + "- Song, Y., et al. \"Score-Based Generative Modeling through Stochastic Differential Equations.\" ICLR 2021.\n", + "- Geffner, T., Papamakarios, G., and Mnih, A. \"Score modeling for simulation-based inference.\" NeurIPS 2022 Workshop on Score-Based Methods. 2022.\n", + "- Sharrock, L., Simons, J., et al. \"Sequential neural score estimation: Likelihood-free inference with conditional score based diffusion models.\" ICML 2024.\n", + "\n", + "**Flow Matching**:\n", + "- Lipman, Y., et al. \"Flow Matching for Generative Modeling.\" ICLR 2023\n", + "- Wildberger, J.B., Buchholz, S., et al. \"Flow Matching for Scalable Simulation-Based Inference.\" NeurIPS 2023." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING (pytensor.tensor.blas): Using NumPy C-API based implementation for BLAS functions.\n" + ] + } + ], + "source": [ + "import torch\n", + "\n", + "from sbi.inference import NPSE\n", + "from sbi.utils import BoxUniform\n", + "from sbi import analysis as analysis" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Example toy simulator\n", + "# Define the prior\n", + "num_dims = 3\n", + "num_sims = 5000\n", + "prior = BoxUniform(low=-torch.ones(num_dims), high=torch.ones(num_dims))\n", + "def simulator(theta):\n", + " # linear gaussian\n", + " return theta + 1.0 + torch.randn_like(theta) * 0.1\n", + "\n", + "# Produce simulations\n", + "theta = prior.sample((num_sims,))\n", + "x = simulator(theta)\n", + "\n", + "theta_o = torch.zeros(num_dims)\n", + "x_o = simulator(theta_o)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## FMPE\n", + "\n", + "Flow-Matching Posterior Estimation (FMPE) is an approach to Simulation-Based Inference\n", + "(SBI) that leverages Flow Matching, a generative modeling technique where the\n", + "transformation from a simple base distribution (like a Gaussian) to the target\n", + "distribution is learned through matching the flow of probability densities.\n", + "\n", + "### Key Concept:\n", + "- **Flow Matching**: The core idea is to model the probability flow between the base\n", + " distribution and the target distribution by minimizing a discrepancy between their\n", + " \"flows\" or \"dynamics\" in the latent space. This is typically done by training a neural\n", + " network to parameterize a vector field that defines how samples should be moved or\n", + " transformed in order to follow the target distribution.\n", + "\n", + "### Step-by-Step Process:\n", + "1. **Base Distribution**: Start with a simple base distribution (e.g., Gaussian).\n", + "2. **Neural Network Parameterization**: Use a neural network to learn a vector field\n", + " that describes the flow from the base distribution to the target distribution.\n", + "3. **Flow Matching Objective**: Optimize the neural network to minimize a loss function\n", + " that captures the difference between the flow of the base distribution and the target\n", + " distribution.\n", + "4. **Sampling**: Once trained, draw samples from the base distribution and apply the\n", + " learned flow transformation to obtain samples from the approximate posterior\n", + " distribution.\n", + "\n", + "FMPE can be more efficient than traditional normalizing flows in some settings,\n", + "especially when the target distribution has complex structures or when high-dimensional\n", + "data is involved (see Dax et al., 2023, https://arxiv.org/abs/2305.17161 for an\n", + "example). However, compared to (discrete time) normalizing flows, flow matching is\n", + "usually slower at inference time because sampling and evaluation of the target\n", + "distribution requires solving the underlying ODE (compared to just doing a NN forward\n", + "pass for normalizing flows). \n", + "\n", + "In the next cell, we'll show how to use FMPE using the `sbi` package.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Neural network successfully converged after 61 epochs." + ] + } + ], + "source": [ + "from sbi.inference import FMPE\n", + "from sbi.neural_nets import flowmatching_nn\n", + "\n", + "# the quick way\n", + "trainer = FMPE(prior)\n", + "trainer.append_simulations(theta, x).train()\n", + "posterior = trainer.build_posterior()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Neural network successfully converged after 125 epochs." + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "241f54dbad2e4f219c8fe01b4a76748b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Drawing 10000 posterior samples: 0%| | 0/10000 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# plot posterior samples\n", + "fig, ax = analysis.pairplot(\n", + " posterior_samples, limits=[[-2, 2], [-2, 2], [-2, 2]], figsize=(5, 5),\n", + " labels=[r\"$\\theta_1$\", r\"$\\theta_2$\", r\"$\\theta_3$\"],\n", + " points=theta_o # add ground truth thetas\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# NPSE\n", + "NPSE approximates the posterior distribution by learning its score function, i.e., gradient of the log-density, using the denoising score matching loss. The class of generative models is referred to as score-based generative models, with close links to diffusion models.\n", + "\n", + "- Score-based generative models have been shown to scale well to very high dimensions (e.g., high-resolutions images), which is particularly useful when the parameter space (and hence, the target posterior) is high-dimensional.\n", + "- On the other hand, sampling can be slower as it involves solving many steps of the stochastic differential equation for reversing the diffusion process.\n", + "\n", + "Note that only the single-round version of NPSE is implemented currently.\n", + "\n", + "For more details on score-based generative models, see [Song et al., 2020](https://arxiv.org/abs/2011.13456) (in particular, Figure 1 and 2)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Instantiate NPSE and append simulations\n", + "inference = NPSE(prior=prior, sde_type=\"ve\")\n", + "inference.append_simulations(theta, x);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note the argument `sde_type`, which defines whether the forward diffusion process has a noising schedule that is Variance Exploding (`ve`, i.e., [SMLD](https://proceedings.neurips.cc/paper/2019/hash/3001ef257407d5a371a96dcd947c7d93-Abstract.html?ref=https://githubhelp.com)), Variance Preserving (`vp`, i.e., [DDPM](https://proceedings.neurips.cc/paper/2020/hash/4c5bcfec8584af0d967f1ab10179ca4b-Abstract.html)), or sub-Variance Preserving (`subvp`) in the limit." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Neural network successfully converged after 365 epochs." + ] + } + ], + "source": [ + "# Train the score estimator\n", + "score_estimator = inference.train()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "53bd342cc6cc4a9ab4a8639091747471", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Drawing 10000 posterior samples: 0%| | 0/499 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# plot posterior samples\n", + "fig, ax = analysis.pairplot(\n", + " posterior_samples, limits=[[-2, 2], [-2, 2], [-2, 2]], figsize=(5, 5),\n", + " labels=[r\"$\\theta_1$\", r\"$\\theta_2$\", r\"$\\theta_3$\"],\n", + " points=theta_o # add ground truth thetas\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}