From 2763b5d76698cd59071f3a8251c88eb3a32ce33f Mon Sep 17 00:00:00 2001 From: zhaohu xing <32668889+920232796@users.noreply.github.com> Date: Fri, 1 Jul 2022 15:24:41 +0800 Subject: [PATCH 01/21] Opt 30b (#16) * clean codes Co-authored-by: Zac Liu --- examples/opt/generate_opt_30b.py | 2 -- examples/opt/opt_30b_en_mutigpu.py | 18 ++++++++++----- flagai/model/gpt2_model.py | 18 +-------------- flagai/model/opt_model.py | 37 ------------------------------ flagai/mp_tools.py | 8 ++++--- 5 files changed, 18 insertions(+), 65 deletions(-) diff --git a/examples/opt/generate_opt_30b.py b/examples/opt/generate_opt_30b.py index 9d3e1d1d..e06e39da 100644 --- a/examples/opt/generate_opt_30b.py +++ b/examples/opt/generate_opt_30b.py @@ -1,7 +1,6 @@ from flagai.model.predictor.predictor import Predictor from flagai.auto_model.auto_loader import AutoLoader import torch -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") loader = AutoLoader(task_name="lm", model_name="opt-30b-en") @@ -9,7 +8,6 @@ model = loader.get_model() tokenizer = loader.get_tokenizer() model.eval() -model.to(device) text = "The trophy doesn’t fit in the suitcase because " predictor = Predictor(model, tokenizer) diff --git a/examples/opt/opt_30b_en_mutigpu.py b/examples/opt/opt_30b_en_mutigpu.py index 0135b6a5..ac2a4a1f 100644 --- a/examples/opt/opt_30b_en_mutigpu.py +++ b/examples/opt/opt_30b_en_mutigpu.py @@ -1,4 +1,4 @@ -# os.environ["CUDA_VISIBLE_DEVICES"] = "0,2" + import torch import os import argparse @@ -7,8 +7,11 @@ import random import numpy as np from flagai.model.predictor.predictor import Predictor +import glob +import time + +# run script : python -m torch.distributed.launch --nproc_per_node=4 --nnodes=1 opt_30b_en_mutigpu.py -# run script : python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 glm_blank_filling_QA_ch_mutigpu.py os.environ["ENV_TYPE"] = "deepspeed+mpu" model_parallel_size = 4 world_size = 4 @@ -58,11 +61,15 @@ def initialize_distributed(): set_random_seed(123) -loader = AutoLoader("lm", model_name="opt-350m-en") + +print(f"building model...") +loader = AutoLoader("lm", model_name="opt-30b-en") model = loader.get_model() -model.half() tokenizer = loader.get_tokenizer() -# model.parallel_output = False +model.half() + +model.parallel_output = False + model.eval() model.to(device) @@ -75,4 +82,3 @@ def initialize_distributed(): if mpu.get_model_parallel_rank() == 0: print(f"pred is {out}") - diff --git a/flagai/model/gpt2_model.py b/flagai/model/gpt2_model.py index 8183f2fa..427c4be6 100644 --- a/flagai/model/gpt2_model.py +++ b/flagai/model/gpt2_model.py @@ -11,12 +11,9 @@ import torch.nn.functional as F if os.getenv('ENV_TYPE') == 'deepspeed+mpu': - from flagai.mpu import get_model_parallel_world_size - from flagai.mpu import get_cuda_rng_tracker from flagai.mpu.utils import divide -if os.getenv('ENV_TYPE') == 'deepspeed+mpu': from flagai.mpu.random import checkpoint - from flagai.mpu import copy_to_model_parallel_region, gather_from_model_parallel_region + from flagai.mpu import copy_to_model_parallel_region, gather_from_model_parallel_region, get_model_parallel_world_size, get_cuda_rng_tracker from flagai.mpu.cross_entropy import vocab_parallel_cross_entropy elif os.getenv('ENV_TYPE') == 'deepspeed': @@ -321,19 +318,6 @@ def forward( None, } - # lm_logits = self.lm_head(hidden_states) - # return_data = {"logits": lm_logits} - # if labels is not None: - # # Shift so that tokens < n predict n - # shift_logits = lm_logits[..., :-1, :].contiguous() - # shift_labels = labels[..., 1:].contiguous() - # loss_fct = nn.CrossEntropyLoss() - # loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), - # shift_labels.view(-1)) - # return_data["loss"] = loss - - # return return_data - def load_weights(self, checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) diff --git a/flagai/model/opt_model.py b/flagai/model/opt_model.py index 3140fda0..ef89695d 100644 --- a/flagai/model/opt_model.py +++ b/flagai/model/opt_model.py @@ -265,43 +265,6 @@ def __init__(self, config, **kwargs): # self.config = config self.transformer = OPTStack(self.config) - # def forward( - # self, - # **data, - # ): - # input_ids = data.get("input_ids", None) - # # attention_mask = data.get("attention_mask", None) - # # position_ids = data.get("position_ids", None) - # labels = data.get("labels", None) - # use_cache = data.get("use_cache", None) - # output_attentions = data.get("output_attentions", None) - # output_hidden_states = data.get("output_hidden_states", True) - # - # transformer_outputs = self.transformer( - # input_ids, - # attention_mask=None, - # position_ids=None, - # use_cache=use_cache, - # output_attentions=output_attentions, - # output_hidden_states=output_hidden_states, - # ) - # hidden_states = transformer_outputs - # - # lm_logits = self.lm_head(hidden_states) - # - # return_data = {"logits": lm_logits} - # if labels is not None: - # # Shift so that tokens < n predict n - # shift_logits = lm_logits[..., :-1, :].contiguous() - # shift_labels = labels[..., 1:].contiguous() - # loss_fct = nn.CrossEntropyLoss() - # loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), - # shift_labels.view(-1)) - # return_data["loss"] = loss - # - # return return_data - - def load_weights(self, checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) diff --git a/flagai/mp_tools.py b/flagai/mp_tools.py index a82b9e05..5f172541 100644 --- a/flagai/mp_tools.py +++ b/flagai/mp_tools.py @@ -219,8 +219,6 @@ def change_pytorch_model_mp_from_1_to_n_new(model_name_brief, checkpoint: str, t start = ratio * i end = ratio * (i + 1) d = torch.load(filenames[i], map_location='cpu') - if d.get("module", None) is None: - d["module"] = d for j in range(start, end): d_new = {} @@ -235,7 +233,11 @@ def change_pytorch_model_mp_from_1_to_n_new(model_name_brief, checkpoint: str, t d_new[k] = None d_new['module'] = {} with torch.no_grad(): - for k, v in d['module'].items(): + + if "module" in d: + d = d["module"] + + for k, v in d.items(): assert len(v.shape) < 3 flag = 0 for keys in trans_keys: From 3e5290715ea736d027cefb2bdd71858b3badc48e Mon Sep 17 00:00:00 2001 From: Zhaodong Yan <94831503+Anhforth@users.noreply.github.com> Date: Fri, 1 Jul 2022 23:43:28 +0800 Subject: [PATCH 02/21] fix bert tokenizer issue (#18) * fix bert tokenizer issue * updated t5, opt and roberta tokenizers * fixed doc 404 error Signed-off-by: ZhaodongYan1 --- README.md | 2 +- README_zh.md | 2 +- flagai/data/dataset/data_utils.py | 2 - flagai/data/tokenizer/bert/bert_tokenizer.py | 56 +++++++--- flagai/data/tokenizer/bert/wordpiece.py | 1 - flagai/data/tokenizer/opt/opt_en_tokenizer.py | 88 ++++++++++++++- .../tokenizer/roberta/roberta_tokenizer.py | 89 +++++++++++++++ flagai/data/tokenizer/t5/t5_tokenizer.py | 102 +++++++++++++++--- tests/bak_test_superglue.py | 32 +++++- 9 files changed, 333 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 56affbb6..1d2990b0 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ FlagAI (Fast LArge-scale General AI models) is an fast, easy-to-use and extensib * Now it supports **WuDao GLM** with a maximum of 10 billion parameters (see [Introduction to GLM](/docs/GLM.md)). It also supports **BERT**, **RoBERTa**, **GPT2**, **T5**, and models from Huggingface Transformers. -* It provides APIs to quickly download and use those pre-trained models on a given text, fine-tune them on widely-used datasets collected from [SuperGLUE](https://super.gluebenchmark.com/) and [CLUE](https://github.com/CLUEbenchmark/CLUE) benchmarks, and then share them with the community on our model hub. It also provides [prompt-learning](/docs/TUTORIAL_7_PROMPT_LERANING.md) toolkit for few shot tasks. +* It provides APIs to quickly download and use those pre-trained models on a given text, fine-tune them on widely-used datasets collected from [SuperGLUE](https://super.gluebenchmark.com/) and [CLUE](https://github.com/CLUEbenchmark/CLUE) benchmarks, and then share them with the community on our model hub. It also provides [prompt-learning](/docs/TUTORIAL_7_PROMPT_LEARNING.md) toolkit for few shot tasks. * These models can be applied to (Chinese/English) Text, for tasks like text classification, information extraction, question answering, summarization, and text generation. diff --git a/README_zh.md b/README_zh.md index ba605c5c..88def64d 100644 --- a/README_zh.md +++ b/README_zh.md @@ -207,7 +207,7 @@ for text_pair in test_data: * [Tutorial 4: 为模型和数据并行训练定制训练器](/doc_zh/TUTORIAL_4_TRAINER.md) * [Tutorial 5: 使用 Autoloader 简化模型和分词器初始化过程](/doc_zh/TUTORIAL_5_INSTRUCTIONS_FOR_AutoLoader.md) * [Tutorial 6: 将现成的推理算法与 Predictor 结合使用](/doc_zh/TUTORIAL_6_INSTRUCTIONS_FOR_PREDICTOR.md) -* [Tutorial 7: 使用飞智提示学习工具包来提高在SuperGLUE任务上的表现](/doc_zh/TUTORIAL_7_PROMPT_LERANING.md) +* [Tutorial 7: 使用飞智提示学习工具包来提高在SuperGLUE任务上的表现](/doc_zh/TUTORIAL_7_PROMPT_LEARNING.md) * [Tutorial 8: 多机训练模型搭建环境](/doc_zh/TUTORIAL_8_ENVIRONMENT_SETUP.md) * [Tutorial 9: 使用encoder/decoder/encoder-decoder模型进行文本生成](/doc_zh/TUTORIAL_9_SEQ2SEQ_METHOD.md) diff --git a/flagai/data/dataset/data_utils.py b/flagai/data/dataset/data_utils.py index 7520afeb..98f65720 100644 --- a/flagai/data/dataset/data_utils.py +++ b/flagai/data/dataset/data_utils.py @@ -120,8 +120,6 @@ def num_special_tokens_to_add(text_a_ids, return num_tokens -# -# def build_input_from_ids(text_a_ids, text_b_ids, answer_ids, diff --git a/flagai/data/tokenizer/bert/bert_tokenizer.py b/flagai/data/tokenizer/bert/bert_tokenizer.py index ae758da8..0ba3fdf6 100644 --- a/flagai/data/tokenizer/bert/bert_tokenizer.py +++ b/flagai/data/tokenizer/bert/bert_tokenizer.py @@ -38,7 +38,7 @@ from ..tokenizer import Tokenizer from .wordpiece import BertTokenizer - +from ..tokenizer import CommandToken class BertWordPieceTokenizer(Tokenizer): """ @@ -70,9 +70,37 @@ def __init__(self, tokenizer_model_type=None, cache_dir=None): # # parse tokens and vocabs from tokenizer self._tokens = list(self.text_tokenizer.vocab.keys()) self._vocab = {k: v for k, v in self.text_tokenizer.vocab.items()} - - # self._text_tokens = list(self._tokens) - # self._text_token_vocab = {k: v for k, v in self.text_tokenizer.vocab.items()} + self.num_tokens = len(self._tokens) + + self._command_tokens = [ + CommandToken('pad', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + CommandToken('ENC', '[CLS]', self.get_specialid_from_text_tokenizer('cls')), + CommandToken('MASK', '[MASK]', + self.get_specialid_from_text_tokenizer('mask')), + CommandToken('unk', '[UNK]', self.get_specialid_from_text_tokenizer('unk')), + CommandToken('sep', '[SEP]', self.get_specialid_from_text_tokenizer('sep')), + CommandToken('eos', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + ] + self._command_tokens.extend([ + CommandToken('sop', '<|startofpiece|>', self.num_tokens), + CommandToken('eop', '<|endofpiece|>', self.num_tokens + 1) + ]) + self.num_tokens += 2 + + self.command_name_map = {tok.name: tok for tok in self._command_tokens} + self.command_token_map = { + tok.token: tok + for tok in self._command_tokens + } + self.command_id_map = {tok.Id: tok for tok in self._command_tokens} + + + def get_specialid_from_text_tokenizer(self, token): + return self.text_tokenizer.vocab[getattr(self.text_tokenizer, "_token_" + str(token))] + + def get_command(self, name): + """get command token corresponding to `name`""" + return self.command_name_map[name] def _encode(self, text): tokens = self.text_tokenizer.tokenize(text) @@ -119,12 +147,6 @@ def decode( sub_texts = [] current_sub_text = [] for token in filtered_tokens: - # if token in self.added_tokens_encoder: - # if current_sub_text: - # sub_texts.append(self.convert_tokens_to_string(current_sub_text)) - # current_sub_text = [] - # sub_texts.append(token) - # else: current_sub_text.append(token) if current_sub_text: @@ -149,25 +171,33 @@ def EncodeAsTokens(self, text, process_fn=None): processed_text = process_fn(processed_text) tokens = self.text_tokenizer.tokenize(processed_text) return tokens - # return Tokenization(tokens, processed_text, text, asIds=False) def IdToToken(self, Id, type_token=False): """convert Id to sentencpiece token""" + if Id in self.command_id_map: + return self.command_id_map[Id].token return self.text_tokenizer.ids_to_tokens[Id] def TokenToId(self, token, type_token=False): """convert sentencpiece token to Id""" + if isinstance(token, (CommandToken)): + return token.Id token = token.lower() try: return self.text_tokenizer.vocab[token] except KeyError: - return self.text_tokenizer.vocab[token.strip()] + try: + return self.text_tokenizer.vocab[token.upper()] + except KeyError: + return self.text_tokenizer.vocab[token.strip()] def DecodeIds(self, Ids): """converts ids to wordpiece tokens and joins them as a text string""" Tokens = [] for Id in Ids: - if Id in self.text_tokenizer.ids_to_tokens: + if Id in self.command_id_map: + Tokens.append(self.command_id_map[Id].token) + elif Id in self.text_tokenizer.ids_to_tokens: Tokens.append(self.text_tokenizer.ids_to_tokens[Id]) new_tokens = [] for token in Tokens: diff --git a/flagai/data/tokenizer/bert/wordpiece.py b/flagai/data/tokenizer/bert/wordpiece.py index bee0d3bb..eb636f7a 100644 --- a/flagai/data/tokenizer/bert/wordpiece.py +++ b/flagai/data/tokenizer/bert/wordpiece.py @@ -119,7 +119,6 @@ def __init__(self, self._token_mask = '[MASK]' for token in ['pad', 'cls', 'sep', 'unk', 'mask']: - _token_id = self.vocab[getattr(self, "_token_" + str(token))] setattr(self, "_token_" + str(token) + "_id", _token_id) diff --git a/flagai/data/tokenizer/opt/opt_en_tokenizer.py b/flagai/data/tokenizer/opt/opt_en_tokenizer.py index 1373f4f1..8501601a 100644 --- a/flagai/data/tokenizer/opt/opt_en_tokenizer.py +++ b/flagai/data/tokenizer/opt/opt_en_tokenizer.py @@ -16,17 +16,57 @@ # See the License for the specific language governing permissions and from transformers import GPT2Tokenizer +from ..tokenizer import CommandToken, Tokenizer import os """define some default command tokens for the tokenizer to use""" -class OPTTokenizer: +class OPTTokenizer(Tokenizer): def __init__(self, tokenizer_model_type="facebook/opt-125m", cache_dir=None): self.text_tokenizer = GPT2Tokenizer.from_pretrained( tokenizer_model_type, cache_dir=cache_dir) self.text_tokenizer.max_len = int(1e12) + # parse tokens and vocabs from tokenizer + self._tokens = list(self.text_tokenizer.get_vocab().keys()) + self._vocab = {k: v for k, v in self.text_tokenizer.get_vocab().items()} + self.num_tokens = len(self._tokens) + self._command_tokens = [ + CommandToken('pad', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + CommandToken('ENC', '[CLS]', self.get_specialid_from_text_tokenizer('cls')), + CommandToken('MASK', '[MASK]', + self.get_specialid_from_text_tokenizer('mask')), + CommandToken('unk', '[UNK]', self.get_specialid_from_text_tokenizer('unk')), + CommandToken('sep', '[SEP]', self.get_specialid_from_text_tokenizer('sep')), + CommandToken('eos', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + ] + self._command_tokens.extend([ + CommandToken('sop', '<|startofpiece|>', self.num_tokens), + CommandToken('eop', '<|endofpiece|>', self.num_tokens + 1) + ]) + self.num_tokens += 2 + + self.command_name_map = {tok.name: tok for tok in self._command_tokens} + self.command_token_map = { + tok.token: tok + for tok in self._command_tokens + } + self.command_id_map = {tok.Id: tok for tok in self._command_tokens} + + def get_specialid_from_text_tokenizer(self, token): + if token in ["eos", "sep"]: + return self._vocab.get('') + elif token == "cls": + return self._vocab.get('') + elif token == "unk": + return self._vocab.get('') + elif token == "pad": + return self._vocab.get('') + elif token == "mask": + return self._vocab.get('') + else: + raise NameError("token not exists") def encode_plus(self, text, max_length=512): return self.text_tokenizer.encode_plus(text, truncation=True, max_length=max_length) @@ -38,5 +78,51 @@ def get_vocab(self): return self.text_tokenizer.get_vocab() + def get_command(self, name): + """get command token corresponding to `name`""" + return self.command_name_map[name] + + + def EncodeAsTokens(self, text, process_fn=None): + """convert wordpiece token to Id""" + processed_text = text + if process_fn is not None: + processed_text = process_fn(processed_text) + tokens = self.text_tokenizer.tokenize(processed_text) + return tokens + + def IdToToken(self, Id, type_token=False): + """convert Id to sentencpiece token""" + if Id in self.command_id_map: + return self.command_id_map[Id].token + return self.text_tokenizer.ids_to_tokens[Id] + + def TokenToId(self, token, type_token=False): + """convert sentencpiece token to Id""" + if isinstance(token, (CommandToken)): + return token.Id + try: + return self._vocab[token] + except KeyError: + return self._vocab[token.strip()] + + def DecodeIds(self, Ids): + """converts ids to wordpiece tokens and joins them as a text string""" + Tokens = [] + for Id in Ids: + if Id in self.command_id_map: + Tokens.append(self.command_id_map[Id].token) + elif Id in self.text_tokenizer.ids_to_tokens: + Tokens.append(self.text_tokenizer.ids_to_tokens[Id]) + new_tokens = [] + for token in Tokens: + if token.startswith('##') and len(new_tokens) > 0: + new_tokens[-1] += token[2:] + else: + new_tokens.append(token) + return ' '.join(new_tokens) + def DecodeTokens(self, Tokens): + """converts wordpiece tokens to a text string""" + return ' '.join(Tokens) diff --git a/flagai/data/tokenizer/roberta/roberta_tokenizer.py b/flagai/data/tokenizer/roberta/roberta_tokenizer.py index c8254e20..75a344d7 100644 --- a/flagai/data/tokenizer/roberta/roberta_tokenizer.py +++ b/flagai/data/tokenizer/roberta/roberta_tokenizer.py @@ -29,3 +29,92 @@ def __init__(self, tokenizer_model_type="roberta-base", cache_dir=None): self.text_tokenizer = RobertaTokenizer.from_pretrained( tokenizer_model_type, cache_dir=cache_dir) self.text_tokenizer.max_len = int(1e12) + + # # parse tokens and vocabs from tokenizer + self._tokens = list(self.text_tokenizer.get_vocab().keys()) + self._vocab = {k: v for k, v in self.text_tokenizer.get_vocab().items()} + self.num_tokens = len(self._tokens) + + self._command_tokens = [ + CommandToken('pad', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + CommandToken('ENC', '[CLS]', self.get_specialid_from_text_tokenizer('cls')), + CommandToken('MASK', '[MASK]', + self.get_specialid_from_text_tokenizer('mask')), + CommandToken('unk', '[UNK]', self.get_specialid_from_text_tokenizer('unk')), + CommandToken('sep', '[SEP]', self.get_specialid_from_text_tokenizer('sep')), + CommandToken('eos', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + ] + self._command_tokens.extend([ + CommandToken('sop', '<|startofpiece|>', self.num_tokens), + CommandToken('eop', '<|endofpiece|>', self.num_tokens + 1) + ]) + self.num_tokens += 2 + + self.command_name_map = {tok.name: tok for tok in self._command_tokens} + self.command_token_map = { + tok.token: tok + for tok in self._command_tokens + } + self.command_id_map = {tok.Id: tok for tok in self._command_tokens} + + def get_specialid_from_text_tokenizer(self, token): + if token in ["eos", "sep"]: + return self._vocab.get('') + elif token == "cls": + return self._vocab.get('') + elif token == "unk": + return self._vocab.get('') + elif token == "pad": + return self._vocab.get('') + elif token == "mask": + return self._vocab.get('') + else: + raise NameError("token not exists") + + def get_command(self, name): + """get command token corresponding to `name`""" + return self.command_name_map[name] + + + def EncodeAsTokens(self, text, process_fn=None): + """convert wordpiece token to Id""" + processed_text = text + if process_fn is not None: + processed_text = process_fn(processed_text) + tokens = self.text_tokenizer.tokenize(processed_text) + return tokens + + def IdToToken(self, Id, type_token=False): + """convert Id to sentencpiece token""" + if Id in self.command_id_map: + return self.command_id_map[Id].token + return self.text_tokenizer.ids_to_tokens[Id] + + def TokenToId(self, token, type_token=False): + """convert sentencpiece token to Id""" + if isinstance(token, (CommandToken)): + return token.Id + try: + return self._vocab[token] + except KeyError: + return self._vocab[token.strip()] + + def DecodeIds(self, Ids): + """converts ids to wordpiece tokens and joins them as a text string""" + Tokens = [] + for Id in Ids: + if Id in self.command_id_map: + Tokens.append(self.command_id_map[Id].token) + elif Id in self.text_tokenizer.ids_to_tokens: + Tokens.append(self.text_tokenizer.ids_to_tokens[Id]) + new_tokens = [] + for token in Tokens: + if token.startswith('##') and len(new_tokens) > 0: + new_tokens[-1] += token[2:] + else: + new_tokens.append(token) + return ' '.join(new_tokens) + + def DecodeTokens(self, Tokens): + """converts wordpiece tokens to a text string""" + return ' '.join(Tokens) diff --git a/flagai/data/tokenizer/t5/t5_tokenizer.py b/flagai/data/tokenizer/t5/t5_tokenizer.py index af29fe72..f19d1089 100644 --- a/flagai/data/tokenizer/t5/t5_tokenizer.py +++ b/flagai/data/tokenizer/t5/t5_tokenizer.py @@ -18,7 +18,7 @@ """Utilities for using and training tokenizers (char, wordpiece, sentencepiece)""" from transformers import T5Tokenizer -from ..tokenizer import Tokenizer +from ..tokenizer import Tokenizer, CommandToken import jieba import unicodedata @@ -33,15 +33,51 @@ def __init__(self, tokenizer_model_type="t5-base", cache_dir=None): self.text_tokenizer = T5Tokenizer.from_pretrained(tokenizer_model_type, cache_dir=cache_dir) - # self.text_encoder = json.load(open(self.text_tokenizer.vocab_file,encoding='utf-8')) - # self.text_decoder = {v:k for k,v in self.text_encoder.items()} - # self.byte_text_encoder = bytes_to_unicode() - # self.text_byte_decoder = {v: k for k, v in self.byte_text_encoder.items()} - # disable max len warnings by increasing max len - self.text_tokenizer.max_len = int(1e12) - # self._tokens = list(self.text_tokenizer.vocab.keys()) - # self._vocab = {k: v for k, v in self.text_tokenizer.vocab.items()} + self._tokens = list(self.text_tokenizer.get_vocab().keys()) + self._vocab = {k: v for k, v in self.text_tokenizer.get_vocab().items()} + self.num_tokens = len(self._tokens) + + self._command_tokens = [ + CommandToken('unk', '[UNK]', self.get_specialid_from_text_tokenizer('unk')), + CommandToken('eos', '[PAD]', self.get_specialid_from_text_tokenizer('pad')), + CommandToken('sep', '[SEP]', self.num_tokens), + + CommandToken('pad', '[PAD]', self.num_tokens + 1), + CommandToken('ENC', '[CLS]', self.num_tokens + 2), + CommandToken('MASK', '[MASK]', + self.num_tokens + 3), + ] + self._command_tokens.extend([ + CommandToken('sop', '<|startofpiece|>', self.num_tokens + 4), + CommandToken('eop', '<|endofpiece|>', self.num_tokens + 5) + ]) + self.num_tokens += 5 + + self.command_name_map = {tok.name: tok for tok in self._command_tokens} + self.command_token_map = { + tok.token: tok + for tok in self._command_tokens + } + self.command_id_map = {tok.Id: tok for tok in self._command_tokens} + + def get_specialid_from_text_tokenizer(self, token): + if token in ["eos", "sep"]: + return self._vocab.get('') + elif token == "cls": + return self._vocab.get('<|endoftext|>') + elif token == "unk": + return self._vocab.get('') + elif token == "pad": + return self._vocab.get('') + elif token == "mask": + return self._vocab.get('') + else: + raise NameError("token not exists") + + def get_command(self, name): + """get command token corresponding to `name`""" + return self.command_name_map[name] def _encode(self, text): """text string to ids""" @@ -49,6 +85,46 @@ def _encode(self, text): ids = self.text_tokenizer.convert_tokens_to_ids(tokens) return ids + def EncodeAsTokens(self, text, process_fn=None): + """convert wordpiece token to Id""" + processed_text = text + if process_fn is not None: + processed_text = process_fn(processed_text) + tokens = self.text_tokenizer.tokenize(processed_text) + return tokens + + def IdToToken(self, Id, type_token=False): + """convert Id to sentencpiece token""" + if Id in self.command_id_map: + return self.command_id_map[Id].token + return self.text_tokenizer.ids_to_tokens[Id] + + def TokenToId(self, token, type_token=False): + """convert sentencpiece token to Id""" + if isinstance(token, (CommandToken)): + return token.Id + return self.text_tokenizer._convert_token_to_id(token.strip()) + + def DecodeIds(self, Ids): + """converts ids to wordpiece tokens and joins them as a text string""" + Tokens = [] + for Id in Ids: + if Id in self.command_id_map: + Tokens.append(self.command_id_map[Id].token) + elif Id in self.text_tokenizer.ids_to_tokens: + Tokens.append(self.text_tokenizer.ids_to_tokens[Id]) + new_tokens = [] + for token in Tokens: + if token.startswith('##') and len(new_tokens) > 0: + new_tokens[-1] += token[2:] + else: + new_tokens.append(token) + return ' '.join(new_tokens) + + def DecodeTokens(self, Tokens): + """converts wordpiece tokens to a text string""" + return ' '.join(Tokens) + class T5KGBPETokenizer(Tokenizer): @@ -63,16 +139,8 @@ def __init__(self, tokenizer_model_type="t5-base", cache_dir=None): self.text_tokenizer = T5Tokenizer.from_pretrained(tokenizer_model_type, cache_dir=cache_dir) - # self.text_encoder = json.load(open(self.text_tokenizer.vocab_file,encoding='utf-8')) - # self.text_decoder = {v:k for k,v in self.text_encoder.items()} - # self.byte_text_encoder = bytes_to_unicode() - # self.text_byte_decoder = {v: k for k, v in self.byte_text_encoder.items()} - # disable max len warnings by increasing max len self.text_tokenizer.max_len = int(1e12) - # self._tokens = list(self.text_tokenizer.vocab.keys()) - # self._vocab = {k: v for k, v in self.text_tokenizer.vocab.items()} - def _encode(self, text): """text string to ids""" tokens = self.text_tokenizer._tokenize(text) diff --git a/tests/bak_test_superglue.py b/tests/bak_test_superglue.py index 70c5bbf9..306abb38 100644 --- a/tests/bak_test_superglue.py +++ b/tests/bak_test_superglue.py @@ -4,7 +4,7 @@ import torch from flagai.trainer import Trainer from flagai.model.glm_model import GLMForSingleTokenCloze, GLMForMultiTokenCloze, GLMForSequenceClassification -from flagai.data.tokenizer import GLMLargeEnWordPieceTokenizer, GLMLargeChTokenizer +from flagai.data.tokenizer import GLMLargeEnWordPieceTokenizer, GLMLargeChTokenizer, BertWordPieceTokenizer, T5BPETokenizer, ROBERTATokenizer, OPTTokenizer, CPMTokenizer from flagai.data.dataset import SuperGlueDataset from flagai.test_utils import CollateArguments from flagai.data.dataset.superglue.control import DEFAULT_METRICS, MULTI_TOKEN_TASKS, CH_TASKS @@ -15,9 +15,12 @@ class TrainerTestCase(unittest.TestCase): def test_init_trainer_pytorch(self): + # for task_name in [ + # 'boolq', 'cb', 'copa', 'multirc', 'rte', 'wic', 'wsc', 'afqmc', + # 'tnews', 'qqp', 'cola', 'mnli', 'qnli' + # ]: for task_name in [ - 'boolq', 'cb', 'copa', 'multirc', 'rte', 'wic', 'wsc', 'afqmc', - 'tnews', 'qqp', 'cola', 'mnli', 'qnli' + 'boolq' ]: trainer = Trainer(env_type='pytorch', epochs=1, @@ -39,7 +42,12 @@ def test_init_trainer_pytorch(self): tokenizer = GLMLargeChTokenizer() else: model_name = 'GLM-large-en' - tokenizer = GLMLargeEnWordPieceTokenizer() + # tokenizer = GLMLargeEnWordPieceTokenizer() + # tokenizer = BertWordPieceTokenizer() + tokenizer = T5BPETokenizer() + # tokenizer = ROBERTATokenizer() + # tokenizer = OPTTokenizer() + # tokenizer = CPMTokenizer() if cl_args.cloze_eval: @@ -57,10 +65,24 @@ def test_init_trainer_pytorch(self): data_dir='./datasets/', dataset_type='train', tokenizer=tokenizer) - train_dataset.example_list = train_dataset.example_list[:1] + # print(train_dataset[0]) collate_fn = ConstructSuperglueStrategy(cl_args, tokenizer, task_name=task_name) + # import torch + # loader = torch.utils.data.DataLoader(train_dataset, + # batch_size=1, + # shuffle=False, + # num_workers=1, + # drop_last=False, + # pin_memory=False, + # collate_fn=collate_fn) + # for data_iterator in loader: + # for key, value in data_iterator.items(): + # print(key, value) + # break + train_dataset.example_list = train_dataset.example_list[:1] + valid_dataset = SuperGlueDataset(task_name=task_name, data_dir='./datasets/', From 3a0c8cb4127a0bfd86ddd965ab4ece40b128ee6c Mon Sep 17 00:00:00 2001 From: zhaohu xing <32668889+920232796@users.noreply.github.com> Date: Wed, 6 Jul 2022 14:19:29 +0800 Subject: [PATCH 03/21] Opt 66b (#19) * autoloader for opt * opt-66b inference * Update train.py * Load data from example dir * add readme of multi GPU inference Co-authored-by: Zac Liu --- examples/glm_title_generation/train.py | 12 +- examples/opt/README.md | 98 +++++++++++++- examples/opt/generate_opt_66b.py | 22 ++++ examples/opt/opt_30b_en_mutigpu.py | 3 - examples/opt/opt_66b_en_mutigpu.py | 108 ++++++++++++++++ flagai/auto_model/auto_loader.py | 8 ++ flagai/model/base_model.py | 2 + flagai/model/blocks/gpt2_block.py | 2 + flagai/model/gpt2_model.py | 133 ++++++++++++------- flagai/model/layers/attentions.py | 50 ++++---- flagai/model/opt_model.py | 169 +------------------------ flagai/model/predictor/gpt.py | 55 ++++++++ flagai/model/predictor/predictor.py | 4 +- flagai/mp_tools.py | 12 +- 14 files changed, 430 insertions(+), 248 deletions(-) create mode 100644 examples/opt/generate_opt_66b.py create mode 100644 examples/opt/opt_66b_en_mutigpu.py create mode 100644 flagai/model/predictor/gpt.py diff --git a/examples/glm_title_generation/train.py b/examples/glm_title_generation/train.py index f7dd2654..e06d2c0b 100644 --- a/examples/glm_title_generation/train.py +++ b/examples/glm_title_generation/train.py @@ -27,12 +27,16 @@ num_checkpoints=1, ) -cur_dir = os.path.dirname(os.path.abspath(__file__)) -src_dir = cur_dir + '/data/train.src' -tgt_dir = cur_dir + '/data/train.tgt' +# cur_dir = os.path.dirname(os.path.abspath(__file__)) +# src_dir = cur_dir + '/data/train.src' +# tgt_dir = cur_dir + '/data/train.tgt' + +src_dir = "./data/train.src" +tgt_dir = "./data/train.tgt" + maxlen = 256 -auto_loader = AutoLoader("seq2seq", +auto_loader = AutoLoader("lm", model_name="GLM-large-ch", model_dir="./state_dict/") model = auto_loader.get_model() diff --git a/examples/opt/README.md b/examples/opt/README.md index 4ad5aa4f..ee727932 100644 --- a/examples/opt/README.md +++ b/examples/opt/README.md @@ -52,4 +52,100 @@ out = predictor.predict_generate_randomsample(text, repetition_penalty=3.0) print(f"input is {text} \n out is {out}") -``` \ No newline at end of file +``` + +# Multi-GPU inference +## OPT-30b + +To inference by multi-GPU and model parallel, we use torch-DDP and Megatron-LM library. +### Basic step +1. Set up the parameters of model parallel, such as ```model_parallel_size``` and ```world_size``` +2. Initialize torch-DDP +3. Initialize Megatron-LM, model parallel +4. Set up random seed +5. Initialize the model and tokenizer +6. Prediction +### code +```python +import torch +import os +import argparse +from flagai import mpu +from flagai.auto_model.auto_loader import AutoLoader +import random +import numpy as np +from flagai.model.predictor.predictor import Predictor + +# run script : python -m torch.distributed.launch --nproc_per_node=4 --nnodes=1 opt_30b_en_mutigpu.py +os.environ["ENV_TYPE"] = "deepspeed+mpu" +model_parallel_size = 4 +world_size = 4 + +os.environ["MODEL_PARALLEL_SIZE"] = str(model_parallel_size) +os.environ["WORLD_SIZE"] = str(world_size) + +def set_random_seed(seed): + """Set random seed for reproducability.""" + if seed is not None and seed > 0: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + mpu.model_parallel_cuda_manual_seed(seed) + +parser = argparse.ArgumentParser() +parser.add_argument('--local_rank', + type=int, + default=0, + help="local_rank") + +ds_args = parser.parse_args() +local_rank = ds_args.local_rank + +master_addr = os.environ.get('MASTER_ADDR', '127.0.0.1') +master_port = os.environ.get('MASTER_PORT', '17501') + +device = torch.device("cuda", local_rank) + +def initialize_distributed(): + """Initialize torch.distributed.""" + torch.backends.cudnn.enabled = False + # Manually set the device ids. + torch.cuda.set_device(device) + # Call the init process + init_method = 'tcp://' + + init_method += master_addr + ':' + master_port + torch.distributed.init_process_group( + backend='nccl', # gloo + world_size=world_size, + rank=local_rank, + init_method=init_method) + mpu.initialize_model_parallel(model_parallel_size) + +initialize_distributed() + +set_random_seed(123) + +print(f"building model...") +loader = AutoLoader("lm", model_name="opt-30b-en") +model = loader.get_model() +tokenizer = loader.get_tokenizer() +model.half() + +model.parallel_output = False +model.eval() +model.to(device) + +torch.distributed.barrier(group=mpu.get_model_parallel_group()) + +text = """I think The Old Man and the Sea is a very good book, what do you think? I think """ + +predictor = Predictor(model, tokenizer) +out = predictor.predict_generate_randomsample(text) +if mpu.get_model_parallel_rank() == 0: + print(f"pred is {out}") +``` +### Run script is +```commandline +python -m torch.distributed.launch --nproc_per_node=4 --nnodes=1 opt_30b_en_mutigpu.py +``` diff --git a/examples/opt/generate_opt_66b.py b/examples/opt/generate_opt_66b.py new file mode 100644 index 00000000..bbee7987 --- /dev/null +++ b/examples/opt/generate_opt_66b.py @@ -0,0 +1,22 @@ +from flagai.model.predictor.predictor import Predictor +from flagai.auto_model.auto_loader import AutoLoader +import torch + +loader = AutoLoader(task_name="lm", + model_name="opt-66b-en") + +model = loader.get_model() +tokenizer = loader.get_tokenizer() +model.eval() + +text = """I think The Old Man and the Sea is a very good book, what do you think? Thank you for your question, I think """ + +predictor = Predictor(model, tokenizer) +out = predictor.predict_generate_randomsample(text, + input_max_length=100, + out_max_length=300, + top_k=50, + top_p=0.9, + repetition_penalty=3.0) + +print(f"input is {text} \n out is {out}") \ No newline at end of file diff --git a/examples/opt/opt_30b_en_mutigpu.py b/examples/opt/opt_30b_en_mutigpu.py index ac2a4a1f..623e769f 100644 --- a/examples/opt/opt_30b_en_mutigpu.py +++ b/examples/opt/opt_30b_en_mutigpu.py @@ -10,8 +10,6 @@ import glob import time -# run script : python -m torch.distributed.launch --nproc_per_node=4 --nnodes=1 opt_30b_en_mutigpu.py - os.environ["ENV_TYPE"] = "deepspeed+mpu" model_parallel_size = 4 world_size = 4 @@ -61,7 +59,6 @@ def initialize_distributed(): set_random_seed(123) - print(f"building model...") loader = AutoLoader("lm", model_name="opt-30b-en") model = loader.get_model() diff --git a/examples/opt/opt_66b_en_mutigpu.py b/examples/opt/opt_66b_en_mutigpu.py new file mode 100644 index 00000000..76616980 --- /dev/null +++ b/examples/opt/opt_66b_en_mutigpu.py @@ -0,0 +1,108 @@ +# os.environ["CUDA_VISIBLE_DEVICES"] = "0,2" +import torch +import os +import time +os.environ["ENV_TYPE"] = "deepspeed+mpu" +os.environ["MODEL_PARALLEL_SIZE"] = '8' +os.environ["WORLD_SIZE"] = '8' +import argparse +from flagai import mpu +import random +import numpy as np +from flagai.model.predictor.predictor import Predictor +from flagai.model.opt_model import OPTModel +from flagai.data.tokenizer import OPTTokenizer + +def get_current_rank(): + with open('current_rank','r',encoding='utf8') as infile: + line = infile.readline().strip() + return int(line) +def set_current_rank(rank): + with open('current_rank','w',encoding='utf8') as outfile: + outfile.write(str(rank)) + +def get_current_pool(): + with open('current_pool','r',encoding='utf8') as infile: + line = infile.readline().strip() + return int(line) + +def set_current_pool(rank): + with open('current_pool','w',encoding='utf8') as outfile: + outfile.write(str(rank)) + +# run script : python -m torch.distributed.launch --nproc_per_node=2 --nnodes=1 opt_66b_en_mutigpu.py +parser = argparse.ArgumentParser() +parser.add_argument('--local_rank', + type=int, + default=0, + help="local_rank") + +def set_random_seed(seed): + """Set random seed for reproducability.""" + if seed is not None and seed > 0: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + mpu.model_parallel_cuda_manual_seed(seed) + +ds_args = parser.parse_args() +local_rank = ds_args.local_rank + +master_addr = os.environ.get('MASTER_ADDR', '127.0.0.1') +master_port = os.environ.get('MASTER_PORT', '17501') + +device = torch.device("cuda", local_rank) +model_parallel_size = 8 +world_size = 8 + +def initialize_distributed(): + """Initialize torch.distributed.""" + torch.backends.cudnn.enabled = False + # Manually set the device ids. + torch.cuda.set_device(device) + # Call the init process + init_method = 'tcp://' + + init_method += master_addr + ':' + master_port + torch.distributed.init_process_group( + backend='nccl', # gloo + world_size=world_size, + rank=local_rank, + init_method=init_method) + mpu.initialize_model_parallel(model_parallel_size) + +initialize_distributed() + +set_current_pool(4) +set_current_rank(0) +set_random_seed(123) +torch.distributed.barrier(group=mpu.get_model_parallel_group()) +tokenizer = OPTTokenizer() + +while get_current_rank() != local_rank: + time.sleep(10) +while get_current_pool() == 0: + time.sleep(10) +set_current_pool(get_current_pool()-1) +print("loading rank {}".format(local_rank)) +set_current_rank(local_rank + 1) + +model = OPTModel.init_from_json('/mnt/models_xingzhaohu/opt-66b-en/config.json') +checkpoint_path = '/mnt/models_xingzhaohu/opt-66b-en/pytorch_model_{:02d}.bin'.format(local_rank) +model.half() +model.eval() +model.to(device) +model.load_weights(checkpoint_path) + +print("loading rank {} finished".format(local_rank)) +set_current_pool(get_current_pool()+1) +print('current rank setting is {}'.format(get_current_pool())) + +torch.distributed.barrier(group=mpu.get_model_parallel_group()) +text = """I think The Old Man and the Sea is a very good book, what do you think? I think """ + +predictor = Predictor(model, tokenizer) +out = predictor.predict_generate_randomsample(text) +if mpu.get_model_parallel_rank() == 0: + print(f"pred is {out}") + diff --git a/flagai/auto_model/auto_loader.py b/flagai/auto_model/auto_loader.py index bcd59b62..2c79a8fd 100644 --- a/flagai/auto_model/auto_loader.py +++ b/flagai/auto_model/auto_loader.py @@ -72,6 +72,8 @@ def __getattr__(self, name): "opt-6.7b-en": ["flagai.model.opt_model","OPTModel", "opt"], "opt-13b-en": ["flagai.model.opt_model","OPTModel", "opt"], "opt-30b-en": ["flagai.model.opt_model","OPTModel", "opt"], + "opt-66b-en": ["flagai.model.opt_model","OPTModel", "opt"], + } TOKENIZER_DICT = { @@ -96,6 +98,8 @@ def __getattr__(self, name): "opt-6.7b-en": ["flagai.data.tokenizer.opt.opt_en_tokenizer","OPTTokenizer"], "opt-13b-en": ["flagai.data.tokenizer.opt.opt_en_tokenizer","OPTTokenizer"], "opt-30b-en": ["flagai.data.tokenizer.opt.opt_en_tokenizer","OPTTokenizer"], + "opt-66b-en": ["flagai.data.tokenizer.opt.opt_en_tokenizer","OPTTokenizer"], + } @@ -106,6 +110,7 @@ def __init__(self, model_name: str = "RoBERTa-base-ch", model_dir: str = "./checkpoints/", only_download_config: bool = False, + device="cpu", **kwargs): """ Args: @@ -169,6 +174,7 @@ def __init__(self, download_path=model_dir, model_name=model_name_, only_download_config=only_download_config, + device=device, **kwargs) model_id = _get_model_id(model_name) @@ -178,6 +184,8 @@ def __init__(self, vocab_file = os.path.join(download_path,'cog-pretrained.model') if not os.path.exists(vocab_file): vocab_file = _get_vocab_path(download_path, "cog-pretrain.model", model_id) + elif model_name == "glm-large-en": + vocab_file = "GLM-large-en" elif model_name == "cpm-large-ch": # two files to load vocab_file_1 = os.path.join(download_path, "vocab.json") diff --git a/flagai/model/base_model.py b/flagai/model/base_model.py index 2005fc43..5480b73b 100644 --- a/flagai/model/base_model.py +++ b/flagai/model/base_model.py @@ -45,6 +45,7 @@ def from_pretrain(cls, download_path='./checkpoints/', model_name='RoBERTa-base-ch', only_download_config=False, + device="cpu", **kwargs): model_id = None try: @@ -87,6 +88,7 @@ def from_pretrain(cls, model_id) if os.path.exists(config_path): model = cls.init_from_json(config_path, **kwargs) + model.to(device) if os.getenv('ENV_TYPE') != 'deepspeed+mpu': if os.path.exists(checkpoint_path): model.load_weights(checkpoint_path) diff --git a/flagai/model/blocks/gpt2_block.py b/flagai/model/blocks/gpt2_block.py index 27737b89..925419de 100644 --- a/flagai/model/blocks/gpt2_block.py +++ b/flagai/model/blocks/gpt2_block.py @@ -21,6 +21,7 @@ def __init__(self, n_ctx, config, scale=False): def forward( self, hidden_states, + layer_past=None, attention_mask=None, head_mask=None, use_cache=False, @@ -34,6 +35,7 @@ def forward( attn_outputs = self.attn( hidden_states, + layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, diff --git a/flagai/model/gpt2_model.py b/flagai/model/gpt2_model.py index 427c4be6..ec00a943 100644 --- a/flagai/model/gpt2_model.py +++ b/flagai/model/gpt2_model.py @@ -100,6 +100,8 @@ def __init__(self, config): self.wte = nn.Embedding(config.vocab_size, config.n_embd) self.wpe = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) + self.project_in = None + self.project_out = None self.h = nn.ModuleList([ GPT2Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer) @@ -114,12 +116,27 @@ def get_input_embeddings(self): def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings + def get_position_embeddings(self, **kwargs): + input_ids = kwargs["input_ids"] + input_shape = input_ids.size() + position_ids = kwargs.get("position_ids", None) + past_length = kwargs["past_length"] + if position_ids is None: + device = input_ids.device + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + + position_embeds = self.wpe(position_ids) + return position_embeds + + def forward( self, input_ids, attention_mask=None, + past_key_values=None, position_ids=None, - use_cache=None, + use_cache=False, output_attentions=None, output_hidden_states=None, ): @@ -131,13 +148,22 @@ def forward( if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) - if position_ids is None: - device = input_ids.device - position_ids = torch.arange(0, - input_shape[-1], - dtype=torch.long, - device=device) - position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + device = input_ids.device + + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + full_ids = input_ids + else: + past_length = past_key_values[0][0].size(-2) + full_ids = torch.ones((input_ids.shape[0], past_length + 1), dtype=torch.long, device=device) + + padding_mask = (full_ids > 0).float() + + position_embeds = self.get_position_embeddings(input_ids=input_ids, past_length=past_length, + position_ids=position_ids, padding_mask=padding_mask, + ) # Attention mask. if attention_mask is not None: @@ -145,17 +171,21 @@ def forward( attention_mask = (1.0 - attention_mask) * -10000.0 inputs_embeds = self.wte(input_ids) - position_embeds = self.wpe(position_ids) + + output_shape = input_shape + (inputs_embeds.size(-1), ) + + if self.project_in is not None: + inputs_embeds = self.project_in(inputs_embeds) + + # position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds hidden_states = self.drop(hidden_states) - output_shape = input_shape + (hidden_states.size(-1), ) - presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None - for i, block in enumerate(self.h): + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states, ) @@ -171,6 +201,7 @@ def custom_forward(*inputs): outputs = checkpoint( create_custom_forward(block), hidden_states, + None, attention_mask, None, use_cache, @@ -180,6 +211,7 @@ def custom_forward(*inputs): outputs = block( hidden_states, + layer_past=layer_past, attention_mask=attention_mask, head_mask=None, use_cache=use_cache, @@ -194,14 +226,19 @@ def custom_forward(*inputs): all_self_attentions = all_self_attentions + ( outputs[2 if use_cache else 1], ) - hidden_states = self.ln_f(hidden_states) + # hidden_states = self.ln_f(hidden_states) + if self.ln_f is not None: + hidden_states = self.ln_f(hidden_states) + + if self.project_out is not None: + hidden_states = self.project_out(hidden_states) hidden_states = hidden_states.view(*output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states, ) - return hidden_states + return hidden_states, presents class GPT2Model(BaseModel): @@ -245,6 +282,7 @@ def forward( **data, ): input_ids = data.get("input_ids", None) + past_key_values = data.get("past_key_values", None) attention_mask = data.get("attention_mask", None) position_ids = data.get("position_ids", None) labels = data.get("labels", None) @@ -252,11 +290,18 @@ def forward( output_attentions = data.get("output_attentions", None) output_hidden_states = data.get("output_hidden_states", None) + device = input_ids.device extend_mask = (input_ids > 0).float() if attention_mask is None: - attention_mask = self._make_causal_mask(input_ids) - extend_mask = extend_mask.unsqueeze(1).unsqueeze( - 1) * attention_mask + + if past_key_values is not None: + past_length = past_key_values[0][0].size(-2) + full_ids = torch.zeros((input_ids.shape[0], past_length + 1), dtype=torch.long, device=device) + extend_mask = self._make_causal_mask(full_ids) + else : + attention_mask = self._make_causal_mask(input_ids) + extend_mask = extend_mask.unsqueeze(1).unsqueeze( + 1) * attention_mask transformer_outputs = self.transformer( input_ids, @@ -265,16 +310,17 @@ def forward( use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, + past_key_values=past_key_values, ) - logits = transformer_outputs + + logits, past_key_values = transformer_outputs + if os.getenv("ENV_TYPE") == 'deepspeed+mpu': logits_parallel = copy_to_model_parallel_region(logits) else: logits_parallel = logits - # if self.output_predict: - # Parallel logits. logits_parallel = F.linear(logits_parallel, self.transformer.wte.weight) @@ -287,36 +333,27 @@ def forward( shift_logits.contiguous().float(), shift_labels).mean() else: loss = F.cross_entropy( - shift_logits.contiguous().float(), shift_labels.long()) - - if self.parallel_output: # Put in different GPUs - return { - 'logits': logits_parallel, - 'loss': loss, - 'hidden_states': None, - } - else: - return { - "logits": - gather_from_model_parallel_region(logits_parallel), - "loss": - loss, - "hidden_states": - None, - } + shift_logits.view(-1, shift_logits.shape[-1]).contiguous().float(), shift_labels.view(-1).contiguous().long()) + + return { + 'logits': logits_parallel, + 'loss': loss, + 'hidden_states': past_key_values, + } + else: - if self.parallel_output: # Put in different GPUs - return { - 'logits': logits_parallel, - 'hidden_states': None, - } + + if os.getenv("ENV_TYPE") == 'deepspeed+mpu': + logits = gather_from_model_parallel_region(logits_parallel) else: - return { - "logits": - gather_from_model_parallel_region(logits_parallel), - "hidden_states": - None, - } + logits = logits_parallel + return { + "logits": + logits, + "hidden_states": + past_key_values, + } + def load_weights(self, checkpoint_path): checkpoint = torch.load(checkpoint_path, diff --git a/flagai/model/layers/attentions.py b/flagai/model/layers/attentions.py index 0cfa5065..3fcf17cf 100644 --- a/flagai/model/layers/attentions.py +++ b/flagai/model/layers/attentions.py @@ -96,12 +96,11 @@ def _attn(self, v, attention_mask=None, head_mask=None, - output_attentions=False): - w = torch.matmul(q, k) + ): + w = torch.matmul(q, k.transpose(-1, -2)) if self.scale: w = w / (float(v.size(-1))**0.5) - nd, ns = w.size(-2), w.size(-1) # if not self.is_cross_attention: # if only "normal" attention layer implements causal mask @@ -119,17 +118,16 @@ def _attn(self, if head_mask is not None: w = w * head_mask w = w.to(v.dtype) # fp16 - outputs = (torch.matmul(w, v), ) - if output_attentions: - outputs += (w, ) - return outputs + outputs = torch.matmul(w, v) + + return outputs, w def merge_heads(self, x): x = x.permute(0, 2, 1, 3).contiguous() new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1), ) return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states - def split_heads(self, x, k=False): + def split_heads(self, x): if os.getenv('ENV_TYPE') == 'deepspeed+mpu': new_x_shape = x.size()[:-1] + ( self.num_attention_heads_per_partition, @@ -138,44 +136,52 @@ def split_heads(self, x, k=False): new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states - if k: - return x.permute(0, 2, 3, - 1) # (batch, head, head_features, seq_length) - else: - return x.permute(0, 2, 1, - 3) # (batch, head, seq_length, head_features) + # if k: + # return x.permute(0, 2, 3, + # 1) # (batch, head, head_features, seq_length) + # else: + return x.permute(0, 2, 1, + 3) # (batch, head, seq_length, head_features) def forward( self, hidden_states, + layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, ): - query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self.split_heads(query) - key = self.split_heads(key, k=True) + key = self.split_heads(key) value = self.split_heads(value) + if layer_past is not None: + past_key, past_value = layer_past + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + if use_cache is True: - present = (key.transpose(-2, -1), value - ) # transpose to have same shapes + present = (key, value + ) else: present = None attn_outputs = self._attn(query, key, value, attention_mask, head_mask, - output_attentions) + ) a = attn_outputs[0] - + if layer_past is not None: + a = a[:, :, -1:] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a) - - return (a, present) + attn_outputs[1:] # a, present, (attentions) + outputs = (a, present) + if output_attentions: + outputs += (attn_outputs[1]) + return outputs # a, present, (attentions) class T5Attention(nn.Module): diff --git a/flagai/model/opt_model.py b/flagai/model/opt_model.py index ef89695d..874cb2c2 100644 --- a/flagai/model/opt_model.py +++ b/flagai/model/opt_model.py @@ -24,74 +24,6 @@ from flagai.model.gpt2_model import GPT2Model, GPT2Stack, GPT2Config from torch.utils.checkpoint import checkpoint - -# class GPT2Config: -# -# def __init__( -# self, -# vocab_size=50257, -# n_positions=1024, -# n_ctx=1024, -# n_embd=768, -# n_layer=12, -# n_head=12, -# n_inner=None, -# activation_function="gelu_new", -# resid_pdrop=0.1, -# embd_pdrop=0.1, -# attn_pdrop=0.1, -# layer_norm_epsilon=1e-5, -# initializer_range=0.02, -# summary_type="cls_index", -# summary_use_proj=True, -# summary_activation=None, -# summary_proj_to_labels=True, -# summary_first_dropout=0.1, -# scale_attn_weights=True, -# gradient_checkpointing=False, -# use_cache=True, -# bos_token_id=50256, -# eos_token_id=50256, -# checkpoint_activations=False, -# hidden_size=768, -# ): -# self.checkpoint_activations = checkpoint_activations -# self.vocab_size = vocab_size -# # self.n_ctx = n_ctx -# self.n_positions = n_positions -# self.n_ctx = n_positions -# self.n_embd = n_embd -# self.hidden_size = hidden_size -# self.n_layer = n_layer -# self.n_head = n_head -# self.n_inner = n_inner -# self.activation_function = activation_function -# self.resid_pdrop = resid_pdrop -# self.embd_pdrop = embd_pdrop -# self.attn_pdrop = attn_pdrop -# self.layer_norm_epsilon = layer_norm_epsilon -# self.initializer_range = initializer_range -# self.summary_type = summary_type -# self.summary_use_proj = summary_use_proj -# self.summary_activation = summary_activation -# self.summary_first_dropout = summary_first_dropout -# self.summary_proj_to_labels = summary_proj_to_labels -# self.gradient_checkpointing = gradient_checkpointing -# self.scale_attn_weights = scale_attn_weights -# self.use_cache = use_cache -# -# self.bos_token_id = bos_token_id -# self.eos_token_id = eos_token_id - - -_CHECKPOINT_FOR_DOC = "facebook/opt-350m" -_CONFIG_FOR_DOC = "OPTConfig" -_TOKENIZER_FOR_DOC = "GPT2Tokenizer" - -# Base model docstring -_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] - - OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/opt-125m", "facebook/opt-350m", @@ -120,22 +52,11 @@ def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int # create positions depending on attention_mask positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 - # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return super().forward(positions + self.offset) -def make_causal_mask(input_ids): - device = input_ids.device - bsz, tgt_len = input_ids.shape - mask = torch.full((tgt_len, tgt_len), 0.0).to(device) - mask_cond = torch.arange(mask.size(-1)).to(device) - mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), - 1.0) - - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) - class OPTStack(GPT2Stack): def __init__(self, config: GPT2Config): super(OPTStack, self).__init__(config) @@ -154,88 +75,12 @@ def __init__(self, config: GPT2Config): else: self.project_in = None - def forward( - self, - input_ids, - attention_mask=None, - position_ids=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - ): - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - batch_size = input_ids.shape[0] - - extend_mask = (input_ids > 0).float() - position_embeds = self.wpe(extend_mask, 0) - - # if attention_mask is None: - attention_mask = make_causal_mask(input_ids) - attention_mask = extend_mask.unsqueeze(1).unsqueeze( - 1) * attention_mask - attention_mask = (1.0 - attention_mask) * -10000.0 - - inputs_embeds = self.wte(input_ids) - if self.project_in is not None: - inputs_embeds = self.project_in(inputs_embeds) - hidden_states = inputs_embeds + position_embeds - - hidden_states = self.drop(hidden_states) - - # output_shape = input_shape + (hidden_states.size(-1), ) - - presents = () if use_cache else None - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - - for i, block in enumerate(self.h): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states, ) - if self.config.checkpoint_activations: - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - return custom_forward - - outputs = checkpoint( - create_custom_forward(block), - hidden_states, - attention_mask, - None, - use_cache, - output_attentions, - ) - else: - - outputs = block( - hidden_states, - attention_mask=attention_mask, - head_mask=None, - use_cache=use_cache, - output_attentions=output_attentions, - ) - - hidden_states = outputs[0] - if use_cache is True: - presents = presents + (outputs[1], ) - - if output_attentions: - all_self_attentions = all_self_attentions + ( - outputs[2 if use_cache else 1], ) - - if self.ln_f is not None: - hidden_states = self.ln_f(hidden_states) - - if self.project_out is not None: - hidden_states = self.project_out(hidden_states) - - # hidden_states = hidden_states.view(*output_shape) - # Add last hidden state - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states, ) - - return hidden_states + def get_position_embeddings(self, **kwargs): + pass + padding_mask = kwargs["padding_mask"] + past_length = kwargs["past_length"] + position_embeds = self.wpe(padding_mask, past_length) + return position_embeds def trans_opt_to_gpt_config(opt_config_json): trans_config_json = {} @@ -262,7 +107,6 @@ class OPTModel(GPT2Model): def __init__(self, config, **kwargs): config = trans_opt_to_gpt_config(config) super(OPTModel, self).__init__(config, **kwargs) - # self.config = config self.transformer = OPTStack(self.config) def load_weights(self, checkpoint_path): @@ -279,7 +123,6 @@ def load_weights(self, checkpoint_path): else : checkpoint_[k] = v - checkpoint = self.transpose_weight(checkpoint_) self.load_state_dict(checkpoint, strict=False) self.lm_head.weight.data = nn.Parameter(self.transformer.wte.weight.data) diff --git a/flagai/model/predictor/gpt.py b/flagai/model/predictor/gpt.py new file mode 100644 index 00000000..e99d11f4 --- /dev/null +++ b/flagai/model/predictor/gpt.py @@ -0,0 +1,55 @@ +from flagai.model.predictor.utils import RepetitionPenaltyLogitsProcessor, TemperatureLogitsProcessor, TopPLogitsProcessor, TopKLogitsProcessor, ListProcessor +import torch +import torch.nn.functional as F + + +def gpt_random_sample_use_cache(model, tokenizer, text, input_max_length, out_max_length, + top_k, top_p, repetition_penalty, temperature, device): + tokenizer_out = tokenizer.encode_plus(text, max_length=input_max_length) + token_ids = tokenizer_out["input_ids"] + token_end_id = tokenizer.token_end_id + if token_ids[-1] == token_end_id: + token_ids = token_ids[:-1] + + lp = [ + RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty), + TemperatureLogitsProcessor(temperature=temperature), + TopKLogitsProcessor(top_k=top_k), + TopPLogitsProcessor(top_p=top_p), + ] + list_processor = ListProcessor(lp) + + token_ids = torch.tensor(token_ids, device=device, + dtype=torch.long).view(1, -1) + output_ids = [] + sep_id = tokenizer.token_end_id + outputs = model(**{"input_ids": token_ids, "use_cache": True}) + scores = outputs["logits"] + past_key_values = outputs["hidden_states"] + + logit_score = torch.log_softmax(scores[:, -1], dim=-1) + logit_score[:, tokenizer.token_unk_id] = -float('Inf') + + filtered_logits = list_processor(token_ids, logit_score) + next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), + num_samples=1) + token_ids = torch.cat([token_ids, next_token.long()], dim=1) + + with torch.no_grad(): + for step in range(out_max_length - 1): + outputs = model(**{"input_ids": next_token, "use_cache": True, "past_key_values": past_key_values}) + scores = outputs["logits"] + past_key_values = outputs["hidden_states"] + + logit_score = torch.log_softmax(scores[:, -1], dim=-1) + logit_score[:, tokenizer.token_unk_id] = -float('Inf') + + filtered_logits = list_processor(token_ids, logit_score) + next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), + num_samples=1) + if sep_id == next_token.item(): + break + output_ids.append(next_token.item()) + token_ids = torch.cat((token_ids, next_token.long()), dim=1) + + return tokenizer.decode(output_ids) \ No newline at end of file diff --git a/flagai/model/predictor/predictor.py b/flagai/model/predictor/predictor.py index a7a414e3..fa426945 100644 --- a/flagai/model/predictor/predictor.py +++ b/flagai/model/predictor/predictor.py @@ -8,6 +8,8 @@ t5_random_sample, gpt_random_sample, \ t5_beamsearch, gpt_beamsearch, bert_random_sample, glm_beamsearch, glm_random_sample from typing import List, Union, Dict, Tuple, Any +from flagai.model.predictor.gpt import gpt_random_sample_use_cache + class Predictor: @@ -277,7 +279,7 @@ def predict_generate_randomsample(self, device) elif "gpt" in self.class_name.lower() or "opt" in self.class_name.lower(): - return gpt_random_sample(self.model, self.tokenizer, text, + return gpt_random_sample_use_cache(self.model, self.tokenizer, text, input_max_length, out_max_length, top_k, top_p, repetition_penalty, temperature, device) diff --git a/flagai/mp_tools.py b/flagai/mp_tools.py index 5f172541..de5394af 100644 --- a/flagai/mp_tools.py +++ b/flagai/mp_tools.py @@ -48,16 +48,17 @@ def check_pytorch_model_mp_size(checkpoint: str, target_mp: int): """ check the checkpoints contains the weights for mp_size = target_mp """ + assert target_mp > 1 assert os.path.isdir(checkpoint) filenames = os.listdir(checkpoint) filenames = [ filename for filename in filenames - if filename.startswith("pytorch_model") + if filename.startswith("pytorch_model_") ] - if 'pytorch_model.bin' in filenames and target_mp == 1: - return True - else: - filenames.remove('pytorch_model.bin') + # if 'pytorch_model.bin' in filenames and target_mp == 1: + # return True + # else: + # filenames.remove('pytorch_model.bin') print( "check the weight files in {}, the number of mp_size({}) {} num_of_files({})" .format(checkpoint, target_mp, @@ -233,7 +234,6 @@ def change_pytorch_model_mp_from_1_to_n_new(model_name_brief, checkpoint: str, t d_new[k] = None d_new['module'] = {} with torch.no_grad(): - if "module" in d: d = d["module"] From 4f8d715f08442df111653bcd95c73b3c9049659e Mon Sep 17 00:00:00 2001 From: Anhforth Date: Wed, 6 Jul 2022 15:56:45 +0800 Subject: [PATCH 04/21] updated release version Signed-off-by: Anhforth --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 55bed9f2..3cf785f4 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="flagai", - version="v1.1.0", + version="v1.1.1", description="FlagAI aims to help researchers and developers to freely train and test large-scale models for NLP tasks.", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", From efc1310b6f193e69ae1f3a8262b10dcf8366fd7e Mon Sep 17 00:00:00 2001 From: Anhforth Date: Wed, 6 Jul 2022 16:51:18 +0800 Subject: [PATCH 05/21] fix tokenizer issue Signed-off-by: Anhforth --- .../tokenizer/roberta/roberta_tokenizer.py | 19 ++++++++++--------- flagai/data/tokenizer/t5/t5_tokenizer.py | 19 ++++++++++--------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/flagai/data/tokenizer/roberta/roberta_tokenizer.py b/flagai/data/tokenizer/roberta/roberta_tokenizer.py index 75a344d7..a525f2a6 100644 --- a/flagai/data/tokenizer/roberta/roberta_tokenizer.py +++ b/flagai/data/tokenizer/roberta/roberta_tokenizer.py @@ -105,15 +105,16 @@ def DecodeIds(self, Ids): for Id in Ids: if Id in self.command_id_map: Tokens.append(self.command_id_map[Id].token) - elif Id in self.text_tokenizer.ids_to_tokens: - Tokens.append(self.text_tokenizer.ids_to_tokens[Id]) - new_tokens = [] - for token in Tokens: - if token.startswith('##') and len(new_tokens) > 0: - new_tokens[-1] += token[2:] - else: - new_tokens.append(token) - return ' '.join(new_tokens) + elif Id < self.text_tokenizer.vocab_size: + Tokens.append(self.text_tokenizer._convert_id_to_token(Id)) + return self.text_tokenizer.convert_tokens_to_string(Tokens) + # new_tokens = [] + # for token in Tokens: + # if token.startswith('##') and len(new_tokens) > 0: + # new_tokens[-1] += token[2:] + # else: + # new_tokens.append(token) + # return ' '.join(new_tokens) def DecodeTokens(self, Tokens): """converts wordpiece tokens to a text string""" diff --git a/flagai/data/tokenizer/t5/t5_tokenizer.py b/flagai/data/tokenizer/t5/t5_tokenizer.py index f19d1089..8774b3af 100644 --- a/flagai/data/tokenizer/t5/t5_tokenizer.py +++ b/flagai/data/tokenizer/t5/t5_tokenizer.py @@ -111,15 +111,16 @@ def DecodeIds(self, Ids): for Id in Ids: if Id in self.command_id_map: Tokens.append(self.command_id_map[Id].token) - elif Id in self.text_tokenizer.ids_to_tokens: - Tokens.append(self.text_tokenizer.ids_to_tokens[Id]) - new_tokens = [] - for token in Tokens: - if token.startswith('##') and len(new_tokens) > 0: - new_tokens[-1] += token[2:] - else: - new_tokens.append(token) - return ' '.join(new_tokens) + elif Id < self.text_tokenizer.vocab_size: + Tokens.append(self.text_tokenizer._convert_id_to_token(Id)) + return self.text_tokenizer.convert_tokens_to_string(Tokens) + # new_tokens = [] + # for token in Tokens: + # if token.startswith('##') and len(new_tokens) > 0: + # new_tokens[-1] += token[2:] + # else: + # new_tokens.append(token) + # return ' '.join(new_tokens) def DecodeTokens(self, Tokens): """converts wordpiece tokens to a text string""" From 9b818694b4d80b1aaa53936c97d2b683b3793dc9 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 8 Jul 2022 14:41:18 +0800 Subject: [PATCH 06/21] fix bug multi_gpu_training --- flagai/model/glm_model.py | 2 +- flagai/trainer.py | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/flagai/model/glm_model.py b/flagai/model/glm_model.py index fe08115a..3c4f9746 100644 --- a/flagai/model/glm_model.py +++ b/flagai/model/glm_model.py @@ -462,7 +462,7 @@ def forward(self, else: loss = F.cross_entropy( - logits_parallel.contiguous().float(), labels.long()) + logits_parallel.reshape(-1, logits_parallel.shape[-1]).contiguous().float(), labels.reshape(-1).long()) if self.parallel_output: # Put in different GPUs return { diff --git a/flagai/trainer.py b/flagai/trainer.py index 1b7d1afd..b9a6a1ba 100644 --- a/flagai/trainer.py +++ b/flagai/trainer.py @@ -309,12 +309,17 @@ def get_dataloader(self, dataset, collate_fn, shuffle=False): shuffle=shuffle) else: if self.env_type == 'deepspeed+mpu': - num_replicas = self.world_size // mpu.get_model_parallel_world_size( - ) - rank = self.rank // mpu.get_model_parallel_world_size() + # num_replicas = self.world_size // mpu.get_model_parallel_world_size( + # ) + # rank = self.rank // mpu.get_model_parallel_world_size() + # rank = mpu.get_model_parallel_rank() + rank = mpu.get_model_parallel_src_rank() + print("*"*80) + print("local rank",self.rank, "model rank", rank) + print("*"*80) sampler = torch.utils.data.distributed.DistributedSampler( dataset, - num_replicas=num_replicas, + # num_replicas=num_replicas, rank=rank, shuffle=shuffle) else: @@ -474,13 +479,12 @@ def train(self, for epoch in range(self.epochs): # log_dist('working on epoch {} ...'.format(epoch), [0]) # Set the data loader epoch to shuffle the index iterator. - if self.env_type == 'deepspeed+mpu': - if mpu.get_model_parallel_rank() == 0: - train_dataloader.sampler.set_epoch(epoch + self.world_size) - elif self.env_type != 'pytorch': + # if self.env_type == 'deepspeed+mpu': + # if mpu.get_model_parallel_rank() == 0: + # train_dataloader.sampler.set_epoch(epoch + self.world_size) + if self.env_type != 'pytorch': train_dataloader.sampler.set_epoch(epoch + self.world_size) - # For all the batches in the dataset. for iteration_, batch in enumerate(train_dataloader): # Train for one step. From 72ffd6a917a87755cbf241a76746accaa4dca518 Mon Sep 17 00:00:00 2001 From: Anhforth Date: Fri, 8 Jul 2022 15:21:55 +0800 Subject: [PATCH 07/21] changed the version Signed-off-by: Anhforth --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3cf785f4..158b41be 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="flagai", - version="v1.1.1", + version="v1.1.2", description="FlagAI aims to help researchers and developers to freely train and test large-scale models for NLP tasks.", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", From e6f89a61b49dbe4bd13fc146c68758e5bc9060ae Mon Sep 17 00:00:00 2001 From: zhaohu xing <32668889+920232796@users.noreply.github.com> Date: Mon, 11 Jul 2022 13:37:05 +0800 Subject: [PATCH 08/21] fix_validation_bug (#24) --- examples/glm_poetry_generation/train.py | 15 ++++++--------- examples/glm_title_generation/train.py | 2 +- flagai/auto_model/auto_loader.py | 10 +++++++--- flagai/trainer.py | 16 ++++++++++------ 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/examples/glm_poetry_generation/train.py b/examples/glm_poetry_generation/train.py index d1d56df9..0a994833 100644 --- a/examples/glm_poetry_generation/train.py +++ b/examples/glm_poetry_generation/train.py @@ -10,8 +10,6 @@ cur_dir = os.path.dirname(os.path.abspath(__file__)) src_dir = cur_dir + '/data/src.txt' tgt_dir = cur_dir + '/data/tgt.txt' -model_dir = "./state_dict/" # ./state_dict/roberta/ # 模型位置 - def read_file(): src = [] @@ -35,14 +33,12 @@ def read_file(): return src, tgt -auto_loader = AutoLoader("seq2seq", - model_name="GLM-large-ch", - model_dir=model_dir) +auto_loader = AutoLoader("lm", + model_name="GLM-large-ch") model = auto_loader.get_model() tokenizer = auto_loader.get_tokenizer() -# Custom model and tokenizer: -# model = GLMForSeq2Seq.from_pretrain(download_path=model_dir,model_name='GLM-large-ch') -# tokenizer = GLMLargeChTokenizer() + + trainer = Trainer( env_type="pytorch", #pytorch or deepspeed experiment_name="glm_seq2seq", @@ -66,6 +62,7 @@ def read_file(): hostfile='./hostfile', deepspeed_config='./deepspeed.json', training_script=__file__, + model_parallel_size=8 ) @@ -123,7 +120,7 @@ def __call__(self, batch): loss_mask[i] = self.pad_loss_mask(loss_mask[i], max_length) return { 'input_ids': torch.LongTensor(input_ids), - 'target_ids': torch.LongTensor(target_ids), + 'labels': torch.LongTensor(target_ids), 'position_ids': torch.LongTensor(position_ids), 'attention_mask': torch.LongTensor(attention_mask), 'loss_mask': torch.LongTensor(loss_mask) diff --git a/examples/glm_title_generation/train.py b/examples/glm_title_generation/train.py index e06d2c0b..2dae9c5a 100644 --- a/examples/glm_title_generation/train.py +++ b/examples/glm_title_generation/train.py @@ -125,7 +125,7 @@ def __call__(self, batch): loss_mask[i] = self.pad_loss_mask(loss_mask[i], max_length) return { 'input_ids': torch.LongTensor(input_ids), - 'target_ids': torch.LongTensor(target_ids), + 'labels': torch.LongTensor(target_ids), 'position_ids': torch.LongTensor(position_ids), 'attention_mask': torch.LongTensor(attention_mask), 'loss_mask': torch.LongTensor(loss_mask) diff --git a/flagai/auto_model/auto_loader.py b/flagai/auto_model/auto_loader.py index 2c79a8fd..37d7ca90 100644 --- a/flagai/auto_model/auto_loader.py +++ b/flagai/auto_model/auto_loader.py @@ -73,7 +73,7 @@ def __getattr__(self, name): "opt-13b-en": ["flagai.model.opt_model","OPTModel", "opt"], "opt-30b-en": ["flagai.model.opt_model","OPTModel", "opt"], "opt-66b-en": ["flagai.model.opt_model","OPTModel", "opt"], - + "glm-10b-ch": ["flagai.model.glm_model", "GLMModel", "glm"], } TOKENIZER_DICT = { @@ -89,6 +89,10 @@ def __getattr__(self, name): "flagai.data.tokenizer.glm_large_en.glm_large_en_tokenizer", "GLMLargeEnWordPieceTokenizer" ], + "glm-10b-ch": [ + "flagai.data.tokenizer.glm_large_ch.glm_large_ch_tokenizer", + "GLMLargeChTokenizer" + ], "gpt2-base-ch": ["flagai.data.tokenizer.bert.bert_tokenizer", "BertTokenizer"], "cpm-large-ch": ["flagai.data.tokenizer.cpm_1.cpm1_tokenizer", "CPMTokenizer"], "opt-125m-en": ["flagai.data.tokenizer.opt.opt_en_tokenizer","OPTTokenizer"], @@ -180,11 +184,11 @@ def __init__(self, model_id = _get_model_id(model_name) print("*"*20, task_name, model_id, model_name) - if model_name == 'glm-large-ch': + if "glm" in model_name and "ch" in model_name: vocab_file = os.path.join(download_path,'cog-pretrained.model') if not os.path.exists(vocab_file): vocab_file = _get_vocab_path(download_path, "cog-pretrain.model", model_id) - elif model_name == "glm-large-en": + elif "glm" in model_name and "en" in model_name: vocab_file = "GLM-large-en" elif model_name == "cpm-large-ch": # two files to load diff --git a/flagai/trainer.py b/flagai/trainer.py index b9a6a1ba..911fe653 100644 --- a/flagai/trainer.py +++ b/flagai/trainer.py @@ -935,18 +935,22 @@ def evaluate(self, all_labels.append(labels) all_losses.append(lm_loss.view(1)) - all_logits = torch.cat(all_logits, dim=0) - all_labels = torch.cat(all_labels, dim=0) + if len(self.metric_methods) != 0: + all_logits = torch.cat(all_logits, dim=0) + all_labels = torch.cat(all_labels, dim=0) + all_losses = torch.cat(all_losses, dim=0) if self.env_type == 'pytorchDDP' or self.env_type == 'deepspeed': - all_logits = self._gather_all(all_logits) - all_labels = self._gather_all(all_labels) + if len(self.metric_methods) != 0: + all_logits = self._gather_all(all_logits) + all_labels = self._gather_all(all_labels) all_losses = self._gather_all(all_losses) elif self.env_type == 'deepspeed+mpu': - all_logits = self._gather_all_mpu(all_logits) - all_labels = self._gather_all_mpu(all_labels) + if len(self.metric_methods) != 0: + all_logits = self._gather_all_mpu(all_logits) + all_labels = self._gather_all_mpu(all_labels) all_losses = self._gather_all_mpu(all_losses) if all_losses.device != torch.device('cpu'): From 29ea8507a49fe301d94ef51e2e4e34b0eb16367c Mon Sep 17 00:00:00 2001 From: Anhforth Date: Mon, 11 Jul 2022 13:43:14 +0800 Subject: [PATCH 09/21] updated the version Signed-off-by: Anhforth --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 158b41be..f5ecb257 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="flagai", - version="v1.1.2", + version="v1.1.3", description="FlagAI aims to help researchers and developers to freely train and test large-scale models for NLP tasks.", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", From 8d44329a514293c7396f45227770c8d2da825458 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 15 Jul 2022 17:57:37 +0800 Subject: [PATCH 10/21] add vit and examples --- examples/vit_cifar100/README.md | 163 ++++++ examples/vit_cifar100/deepspeed.json | 48 ++ examples/vit_cifar100/hostfile | 1 + examples/vit_cifar100/train_DDP.py | 86 +++ examples/vit_cifar100/train_deepspeed.py | 87 +++ examples/vit_cifar100/train_single_gpu.py | 85 +++ examples/vit_cifar100/validate.py | 76 +++ flagai/auto_model/auto_loader.py | 115 ++-- flagai/launch.py | 12 +- flagai/model/base_model.py | 30 +- flagai/model/vision/layers/__init__.py | 42 ++ flagai/model/vision/layers/activations.py | 145 +++++ flagai/model/vision/layers/activations_jit.py | 90 ++++ flagai/model/vision/layers/activations_me.py | 218 ++++++++ .../vision/layers/adaptive_avgmax_pool.py | 118 ++++ .../model/vision/layers/attention_pool2d.py | 131 +++++ flagai/model/vision/layers/blur_pool.py | 42 ++ flagai/model/vision/layers/bottleneck_attn.py | 157 ++++++ flagai/model/vision/layers/cbam.py | 112 ++++ flagai/model/vision/layers/classifier.py | 56 ++ flagai/model/vision/layers/cond_conv2d.py | 123 +++++ flagai/model/vision/layers/config.py | 115 ++++ flagai/model/vision/layers/conv2d_same.py | 42 ++ flagai/model/vision/layers/conv_bn_act.py | 73 +++ flagai/model/vision/layers/create_act.py | 148 +++++ flagai/model/vision/layers/create_attn.py | 89 +++ flagai/model/vision/layers/create_conv2d.py | 36 ++ flagai/model/vision/layers/create_norm_act.py | 88 +++ flagai/model/vision/layers/drop.py | 166 ++++++ flagai/model/vision/layers/eca.py | 145 +++++ flagai/model/vision/layers/evo_norm.py | 350 ++++++++++++ .../vision/layers/filter_response_norm.py | 68 +++ flagai/model/vision/layers/gather_excite.py | 90 ++++ flagai/model/vision/layers/global_context.py | 67 +++ flagai/model/vision/layers/halo_attn.py | 233 ++++++++ flagai/model/vision/layers/helpers.py | 31 ++ flagai/model/vision/layers/inplace_abn.py | 87 +++ flagai/model/vision/layers/lambda_layer.py | 133 +++++ flagai/model/vision/layers/linear.py | 19 + flagai/model/vision/layers/median_pool.py | 49 ++ flagai/model/vision/layers/mixed_conv2d.py | 51 ++ flagai/model/vision/layers/ml_decoder.py | 156 ++++++ flagai/model/vision/layers/mlp.py | 126 +++++ flagai/model/vision/layers/non_local_attn.py | 145 +++++ flagai/model/vision/layers/norm.py | 24 + flagai/model/vision/layers/norm_act.py | 151 ++++++ flagai/model/vision/layers/padding.py | 56 ++ flagai/model/vision/layers/patch_embed.py | 40 ++ flagai/model/vision/layers/pool2d_same.py | 73 +++ flagai/model/vision/layers/pos_embed.py | 207 +++++++ .../model/vision/layers/selective_kernel.py | 119 +++++ flagai/model/vision/layers/separable_conv.py | 76 +++ flagai/model/vision/layers/space_to_depth.py | 53 ++ flagai/model/vision/layers/split_attn.py | 84 +++ flagai/model/vision/layers/split_batchnorm.py | 75 +++ flagai/model/vision/layers/squeeze_excite.py | 74 +++ flagai/model/vision/layers/std_conv.py | 133 +++++ flagai/model/vision/layers/test_time_pool.py | 52 ++ flagai/model/vision/layers/trace_utils.py | 13 + flagai/model/vision/layers/weight_init.py | 89 +++ flagai/model/vision/vit.py | 505 ++++++++++++++++++ flagai/trainer.py | 199 ++++++- flagai/utils.py | 4 +- ...s.out.tfevents.1657856025.deepspeed.4989.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856036.deepspeed.4996.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856036.deepspeed.4997.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856036.deepspeed.4998.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856036.deepspeed.4999.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856036.deepspeed.5001.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856036.deepspeed.5003.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856037.deepspeed.5000.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657856037.deepspeed.5002.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860119.deepspeed.5045.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5052.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5053.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5054.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5055.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5056.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5057.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5058.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860131.deepspeed.5059.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657860187.deepspeed.5102.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862289.deepspeed.5186.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862349.deepspeed.5266.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862360.deepspeed.5273.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862360.deepspeed.5274.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862360.deepspeed.5276.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862360.deepspeed.5277.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862361.deepspeed.5275.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862361.deepspeed.5278.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862361.deepspeed.5279.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862361.deepspeed.5280.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862493.deepspeed.5347.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5354.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5355.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5356.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5357.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5358.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5359.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862504.deepspeed.5360.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862505.deepspeed.5361.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862544.deepspeed.5395.0 | Bin 0 -> 40 bytes ...s.out.tfevents.1657862555.deepspeed.5402.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5403.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5404.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5405.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5406.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5407.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5408.0 | Bin 0 -> 4880 bytes ...s.out.tfevents.1657862555.deepspeed.5409.0 | Bin 0 -> 4880 bytes 110 files changed, 6405 insertions(+), 66 deletions(-) create mode 100644 examples/vit_cifar100/README.md create mode 100644 examples/vit_cifar100/deepspeed.json create mode 100644 examples/vit_cifar100/hostfile create mode 100644 examples/vit_cifar100/train_DDP.py create mode 100644 examples/vit_cifar100/train_deepspeed.py create mode 100644 examples/vit_cifar100/train_single_gpu.py create mode 100644 examples/vit_cifar100/validate.py create mode 100755 flagai/model/vision/layers/__init__.py create mode 100755 flagai/model/vision/layers/activations.py create mode 100755 flagai/model/vision/layers/activations_jit.py create mode 100755 flagai/model/vision/layers/activations_me.py create mode 100755 flagai/model/vision/layers/adaptive_avgmax_pool.py create mode 100755 flagai/model/vision/layers/attention_pool2d.py create mode 100755 flagai/model/vision/layers/blur_pool.py create mode 100755 flagai/model/vision/layers/bottleneck_attn.py create mode 100755 flagai/model/vision/layers/cbam.py create mode 100755 flagai/model/vision/layers/classifier.py create mode 100755 flagai/model/vision/layers/cond_conv2d.py create mode 100755 flagai/model/vision/layers/config.py create mode 100755 flagai/model/vision/layers/conv2d_same.py create mode 100755 flagai/model/vision/layers/conv_bn_act.py create mode 100755 flagai/model/vision/layers/create_act.py create mode 100755 flagai/model/vision/layers/create_attn.py create mode 100755 flagai/model/vision/layers/create_conv2d.py create mode 100755 flagai/model/vision/layers/create_norm_act.py create mode 100755 flagai/model/vision/layers/drop.py create mode 100755 flagai/model/vision/layers/eca.py create mode 100755 flagai/model/vision/layers/evo_norm.py create mode 100755 flagai/model/vision/layers/filter_response_norm.py create mode 100755 flagai/model/vision/layers/gather_excite.py create mode 100755 flagai/model/vision/layers/global_context.py create mode 100755 flagai/model/vision/layers/halo_attn.py create mode 100755 flagai/model/vision/layers/helpers.py create mode 100755 flagai/model/vision/layers/inplace_abn.py create mode 100755 flagai/model/vision/layers/lambda_layer.py create mode 100755 flagai/model/vision/layers/linear.py create mode 100755 flagai/model/vision/layers/median_pool.py create mode 100755 flagai/model/vision/layers/mixed_conv2d.py create mode 100755 flagai/model/vision/layers/ml_decoder.py create mode 100755 flagai/model/vision/layers/mlp.py create mode 100755 flagai/model/vision/layers/non_local_attn.py create mode 100755 flagai/model/vision/layers/norm.py create mode 100755 flagai/model/vision/layers/norm_act.py create mode 100755 flagai/model/vision/layers/padding.py create mode 100755 flagai/model/vision/layers/patch_embed.py create mode 100755 flagai/model/vision/layers/pool2d_same.py create mode 100755 flagai/model/vision/layers/pos_embed.py create mode 100755 flagai/model/vision/layers/selective_kernel.py create mode 100755 flagai/model/vision/layers/separable_conv.py create mode 100755 flagai/model/vision/layers/space_to_depth.py create mode 100755 flagai/model/vision/layers/split_attn.py create mode 100755 flagai/model/vision/layers/split_batchnorm.py create mode 100755 flagai/model/vision/layers/squeeze_excite.py create mode 100755 flagai/model/vision/layers/std_conv.py create mode 100755 flagai/model/vision/layers/test_time_pool.py create mode 100755 flagai/model/vision/layers/trace_utils.py create mode 100755 flagai/model/vision/layers/weight_init.py create mode 100644 flagai/model/vision/vit.py create mode 100644 vit-cifar100/events.out.tfevents.1657856025.deepspeed.4989.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4996.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4997.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4998.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4999.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.5001.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.5003.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856037.deepspeed.5000.0 create mode 100644 vit-cifar100/events.out.tfevents.1657856037.deepspeed.5002.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860119.deepspeed.5045.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5052.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5053.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5054.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5055.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5056.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5057.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5058.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5059.0 create mode 100644 vit-cifar100/events.out.tfevents.1657860187.deepspeed.5102.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862289.deepspeed.5186.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862349.deepspeed.5266.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5273.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5274.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5276.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5277.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5275.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5278.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5279.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5280.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862493.deepspeed.5347.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5354.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5355.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5356.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5357.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5358.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5359.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5360.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862505.deepspeed.5361.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862544.deepspeed.5395.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5402.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5403.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5404.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5405.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5406.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5407.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5408.0 create mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5409.0 diff --git a/examples/vit_cifar100/README.md b/examples/vit_cifar100/README.md new file mode 100644 index 00000000..1cd35edd --- /dev/null +++ b/examples/vit_cifar100/README.md @@ -0,0 +1,163 @@ +# Vit for classification with cifar100 dataset + +Vision Transformer(Vit) is becoming increasingly popular in the field of +compute vision(CV). More and more tasks are using Vit to achieve the SOTA. + +The paper is in https://arxiv.org/pdf/2010.11929.pdf. + +Code is in https://github.com/google-research/vision_transformer. + +## How to use +We can easily use the Vit to finetune cifar100 dataset. +### Training +```python +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +trainer = Trainer( + env_type="pytorch", + experiment_name="vit-cifar100", + batch_size=64, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100", + save_interval=1000, + num_checkpoints=1, +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + + +if __name__ == '__main__': + loader = AutoLoader(task_name="backbone", + model_name="Vit-base-p16", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + + train_dataset, val_dataset = build_cifar() + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) +``` + +### Validation +If you have trained a model, you can valite it again by following code. +```python +import torch +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.auto_model.auto_loader import AutoLoader +import os +from tqdm import tqdm + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def build_cifar(): + + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + + model_save_dir = "./checkpoints_vit_cifar100" + print(f"loadding model in :{model_save_dir}") + loader = AutoLoader(task_name="backbone", + model_name="Vit-base-p16", + num_classes=100) + + model = loader.get_model() + + model.load_state_dict(torch.load(os.path.join(model_save_dir, "38000", "pytorch_model.bin"), map_location=device)["module"]) + print(f"model load success.......") + model.to(device) + + val_dataset = build_cifar() + + val_dataloader = DataLoader(val_dataset, + batch_size=1, + shuffle=False, + collate_fn=collate_fn) + index = 0 + accuracy = 0.0 + for data in tqdm(val_dataloader, total=len(val_dataloader)): + index += 1 + data = {k: v.to(device) for k, v in data.items()} + labels = data["labels"] + pred = model(**data)["logits"] + acc = validate(pred, labels) + accuracy += acc + + print(f"accuracy is {accuracy / index}") +``` diff --git a/examples/vit_cifar100/deepspeed.json b/examples/vit_cifar100/deepspeed.json new file mode 100644 index 00000000..f2339ca3 --- /dev/null +++ b/examples/vit_cifar100/deepspeed.json @@ -0,0 +1,48 @@ +{ + "train_micro_batch_size_per_gpu": 64, + "gradient_accumulation_steps": 1, + "steps_per_print": 100, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 2, + "contiguous_gradients": false, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 5e7, + "allgather_bucket_size": 5e7, + "cpu_offload": true + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 1e-5, + "warmup_num_steps": 2000 + } + }, + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "Adam", + "params": { + "lr": 1e-5, + "weight_decay": 0.1, + "betas": [ + 0.9, + 0.98 + ], + "eps": 1e-6 + } + }, + "activation_checkpointing": { + "partition_activations": true, + "contiguous_memory_optimization": false + }, + "wall_clock_breakdown": false + } diff --git a/examples/vit_cifar100/hostfile b/examples/vit_cifar100/hostfile new file mode 100644 index 00000000..51356577 --- /dev/null +++ b/examples/vit_cifar100/hostfile @@ -0,0 +1 @@ +127.0.0.1 slots=2 \ No newline at end of file diff --git a/examples/vit_cifar100/train_DDP.py b/examples/vit_cifar100/train_DDP.py new file mode 100644 index 00000000..06f5cd1a --- /dev/null +++ b/examples/vit_cifar100/train_DDP.py @@ -0,0 +1,86 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "pytorchDDP" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-8gpu", + batch_size=150, + num_gpus=8, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_8gpu", + save_interval=1000, + num_checkpoints=1, + hostfile="./hostfile", +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/examples/vit_cifar100/train_deepspeed.py b/examples/vit_cifar100/train_deepspeed.py new file mode 100644 index 00000000..27d46628 --- /dev/null +++ b/examples/vit_cifar100/train_deepspeed.py @@ -0,0 +1,87 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "deepspeed" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-deepspeed", + batch_size=150, + num_gpus=8, + fp16=True, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_deepspeed", + save_interval=1000, + num_checkpoints=1, + hostfile="./hostfile", +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/examples/vit_cifar100/train_single_gpu.py b/examples/vit_cifar100/train_single_gpu.py new file mode 100644 index 00000000..ef7e1356 --- /dev/null +++ b/examples/vit_cifar100/train_single_gpu.py @@ -0,0 +1,85 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "pytorch" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-single_gpu", + batch_size=150, + num_gpus=1, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_single_gpu", + save_interval=1000, + num_checkpoints=1, +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/examples/vit_cifar100/validate.py b/examples/vit_cifar100/validate.py new file mode 100644 index 00000000..e52eb113 --- /dev/null +++ b/examples/vit_cifar100/validate.py @@ -0,0 +1,76 @@ +import torch +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.auto_model.auto_loader import AutoLoader +import os +from tqdm import tqdm + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def build_cifar(): + + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + + model_save_dir = "./checkpoints_vit_cifar100" + print(f"loadding model in :{model_save_dir}") + loader = AutoLoader(task_name="backbone", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + + model.load_state_dict(torch.load(os.path.join(model_save_dir, "38000", "pytorch_model.bin"), map_location=device)["module"]) + print(f"model load success.......") + model.to(device) + + val_dataset = build_cifar() + + val_dataloader = DataLoader(val_dataset, + batch_size=1, + shuffle=False, + collate_fn=collate_fn) + index = 0 + accuracy = 0.0 + for data in tqdm(val_dataloader, total=len(val_dataloader)): + index += 1 + data = {k: v.to(device) for k, v in data.items()} + labels = data["labels"] + pred = model(**data)["logits"] + acc = validate(pred, labels) + accuracy += acc + + print(f"accuracy is {accuracy / index}") + + + + + + + + + + diff --git a/flagai/auto_model/auto_loader.py b/flagai/auto_model/auto_loader.py index 37d7ca90..22e2273b 100644 --- a/flagai/auto_model/auto_loader.py +++ b/flagai/auto_model/auto_loader.py @@ -54,26 +54,37 @@ def __getattr__(self, name): "glm_title-generation": ["flagai.model.glm_model", "GLMForSeq2Seq"], "opt_seq2seq": ("flagai.model.opt_model","OPTModel"), "opt_lm": ("flagai.model.opt_model","OPTModel"), + "vit_classification": ("flagai.model.vision.vit", "VisionTransformer") + } MODEL_DICT = { - "bert-base-en": ["flagai.model.bert_model", "BertModel", "bert"], - "roberta-base-ch": ["flagai.model.bert_model", "BertModel", "bert"], - "t5-base-en": ["flagai.model.t5_model", "T5Model", "t5"], - "t5-base-ch": ["flagai.model.t5_model", "T5Model", "t5"], - "glm-large-ch": ["flagai.model.glm_model", "GLMModel", "glm"], - "glm-large-en": ["flagai.model.glm_model", "GLMModel", "glm"], - "gpt2-base-ch": ["flagai.model.gpt2_model", "GPT2Model", "gpt2"], - "cpm-large-ch": ["flagai.model.gpt2_model", "GPT2Model", "cpm"], - "opt-125m-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-350m-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-1.3b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-2.7b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-6.7b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-13b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-30b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-66b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "glm-10b-ch": ["flagai.model.glm_model", "GLMModel", "glm"], + "bert-base-en": ["flagai.model.bert_model", "BertModel", "bert", "nlp"], + "roberta-base-ch": ["flagai.model.bert_model", "BertModel", "bert", "nlp"], + "t5-base-en": ["flagai.model.t5_model", "T5Model", "t5", "nlp"], + "t5-base-ch": ["flagai.model.t5_model", "T5Model", "t5", "nlp"], + "glm-large-ch": ["flagai.model.glm_model", "GLMModel", "glm", "nlp"], + "glm-large-en": ["flagai.model.glm_model", "GLMModel", "glm", "nlp"], + "gpt2-base-ch": ["flagai.model.gpt2_model", "GPT2Model", "gpt2", "nlp"], + "cpm-large-ch": ["flagai.model.gpt2_model", "GPT2Model", "cpm", "nlp"], + "opt-125m-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-350m-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-1.3b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-2.7b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-6.7b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-13b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-30b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-66b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "glm-10b-ch": ["flagai.model.glm_model", "GLMModel", "glm", "nlp"], + + "vit-base-p16-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-base-p16-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-base-p32-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-base-p32-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p16-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p16-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p32-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p32-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], } TOKENIZER_DICT = { @@ -106,7 +117,6 @@ def __getattr__(self, name): } - class AutoLoader: def __init__(self, @@ -153,6 +163,8 @@ def __init__(self, return brief_model_name = MODEL_DICT[model_name][2] + model_type = MODEL_DICT[model_name][3] + # The dir to save config, vocab and model. self.model_name = ALL_TASK.get(f"{brief_model_name}_{task_name}", None) @@ -184,38 +196,41 @@ def __init__(self, model_id = _get_model_id(model_name) print("*"*20, task_name, model_id, model_name) - if "glm" in model_name and "ch" in model_name: - vocab_file = os.path.join(download_path,'cog-pretrained.model') - if not os.path.exists(vocab_file): - vocab_file = _get_vocab_path(download_path, "cog-pretrain.model", model_id) - elif "glm" in model_name and "en" in model_name: - vocab_file = "GLM-large-en" - elif model_name == "cpm-large-ch": - # two files to load - vocab_file_1 = os.path.join(download_path, "vocab.json") - vocab_file_2 = os.path.join(download_path, "chinese_vocab.model") - if not os.path.exists(vocab_file_1): - vocab_file_1 = _get_vocab_path(download_path, "vocab.json", - model_id) - if not os.path.exists(vocab_file_2): - vocab_file_2 = _get_vocab_path(download_path, - "chinese_vocab.model", model_id) - else: - vocab_file = os.path.join(download_path, 'vocab.txt') - if not os.path.exists(vocab_file): - vocab_file = _get_vocab_path(download_path, "vocab.txt", - model_id) - tokenizer_class = TOKENIZER_DICT[model_name] - tokenizer_class = getattr(LazyImport(tokenizer_class[0]), - tokenizer_class[1]) - if model_name == "cpm-large-ch": - self.tokenizer = tokenizer_class(vocab_file_1, vocab_file_2) - elif brief_model_name == "opt": - self.tokenizer = tokenizer_class("facebook/opt-350m") - elif model_name in ["glm-large-en", "glm-large-ch"]: - self.tokenizer = tokenizer_class() - else : - self.tokenizer = tokenizer_class(vocab_file) + if model_type == "nlp": + if "glm" in model_name and "ch" in model_name: + vocab_file = os.path.join(download_path,'cog-pretrained.model') + if not os.path.exists(vocab_file): + vocab_file = _get_vocab_path(download_path, "cog-pretrain.model", model_id) + elif "glm" in model_name and "en" in model_name: + vocab_file = "GLM-large-en" + elif model_name == "cpm-large-ch": + # two files to load + vocab_file_1 = os.path.join(download_path, "vocab.json") + vocab_file_2 = os.path.join(download_path, "chinese_vocab.model") + if not os.path.exists(vocab_file_1): + vocab_file_1 = _get_vocab_path(download_path, "vocab.json", + model_id) + if not os.path.exists(vocab_file_2): + vocab_file_2 = _get_vocab_path(download_path, + "chinese_vocab.model", model_id) + else: + vocab_file = os.path.join(download_path, 'vocab.txt') + if not os.path.exists(vocab_file): + vocab_file = _get_vocab_path(download_path, "vocab.txt", + model_id) + tokenizer_class = TOKENIZER_DICT[model_name] + tokenizer_class = getattr(LazyImport(tokenizer_class[0]), + tokenizer_class[1]) + if model_name == "cpm-large-ch": + self.tokenizer = tokenizer_class(vocab_file_1, vocab_file_2) + elif brief_model_name == "opt": + self.tokenizer = tokenizer_class("facebook/opt-350m") + elif model_name in ["glm-large-en", "glm-large-ch"]: + self.tokenizer = tokenizer_class() + else : + self.tokenizer = tokenizer_class(vocab_file) + elif model_type == "vision": + self.tokenizer = None def get_task_name(self, brief_model_name): all_model_task = list(ALL_TASK.keys()) diff --git a/flagai/launch.py b/flagai/launch.py index 3dcfe22b..ecba3254 100644 --- a/flagai/launch.py +++ b/flagai/launch.py @@ -74,7 +74,8 @@ def launch_dist(launcher='distributed_deepspeed', hostfile='hostfile', nccl_info=False, training_script='train.py', - training_script_paras=None): + training_script_paras=None, + training_paras=None,): try: resource_pool = fetch_hostfile(hostfile) except: @@ -151,6 +152,9 @@ def launch_dist(launcher='distributed_deepspeed', ] cmd_launch.extend(torch_distributed_args) cmd_launch.append(training_script) + if training_paras: + cmd_launch.extend(training_paras) + cmd_launch.append('--not_call_launch') run_cmd = ' '.join(cmd_launch) log_dist(run_cmd) @@ -196,6 +200,9 @@ def launch_dist(launcher='distributed_deepspeed', if len(training_script_paras) > 0: cmd_launch.extend(training_script_paras) + if training_paras: + cmd_launch.extend(training_paras) + cmd_launch.append('--not_call_launch') run_cmd = ' '.join(cmd_launch) log_dist(run_cmd) @@ -226,6 +233,9 @@ def launch_dist(launcher='distributed_deepspeed', if len(training_script_paras) > 0: cmd_launch.extend(training_script_paras) + if training_paras: + cmd_launch.extend(training_paras) + run_cmd = ' '.join(cmd_launch) log_dist(run_cmd) subprocess.Popen(run_cmd, shell=True) diff --git a/flagai/model/base_model.py b/flagai/model/base_model.py index 5480b73b..9ea2d594 100644 --- a/flagai/model/base_model.py +++ b/flagai/model/base_model.py @@ -8,7 +8,7 @@ from typing import Union from flagai.model.file_utils import _get_model_id, _get_config_path, _get_checkpoint_path, _get_vocab_path, _get_model_files import os - +from glob import glob # The base model for models class BaseModel(Module): @@ -59,12 +59,34 @@ def from_pretrain(cls, # downloading the files model: Union[Module, None] if model_id and model_id != "null": + model_files = eval(_get_model_files(model_name)) if not os.path.exists(os.path.join(download_path, 'vocab.txt')): - _get_vocab_path(download_path, "vocab.txt", model_id) + if "vocab.txt" in model_files: + _get_vocab_path(download_path, "vocab.txt", model_id) if not only_download_config and not os.path.exists(os.path.join(download_path, 'config.json')): - model_files = eval(_get_model_files(model_name)) - if 'pytorch_model.bin' in model_files: + if os.getenv('ENV_TYPE') == 'deepspeed+mpu': + model_parallel_size = int(os.getenv("MODEL_PARALLEL_SIZE")) + if model_parallel_size > 1: + # if gpus == nums_of_modelhub_models + # can load + # else need to download the pytorch_model.bin and to recut. + model_hub_parallel_size = 0 + for f in model_files: + if "pytorch_model_" in f: + model_hub_parallel_size += 1 + else: + model_parallel_size = 1 + + if "pytorch_model_01.bin" in model_files and model_parallel_size > 1 and model_hub_parallel_size == model_parallel_size: + # Only to download the model slices(megatron-lm). + for file_to_load in model_files: + if "pytorch_model_" in file_to_load: + _get_checkpoint_path(download_path, + file_to_load, + model_id) + + elif 'pytorch_model.bin' in model_files: checkpoint_path = _get_checkpoint_path(download_path, 'pytorch_model.bin', model_id) diff --git a/flagai/model/vision/layers/__init__.py b/flagai/model/vision/layers/__init__.py new file mode 100755 index 00000000..7e9e7b19 --- /dev/null +++ b/flagai/model/vision/layers/__init__.py @@ -0,0 +1,42 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ + EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a +from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp, ConvMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvNormAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .trace_utils import _assert, _float_to_int +from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ diff --git a/flagai/model/vision/layers/activations.py b/flagai/model/vision/layers/activations.py new file mode 100755 index 00000000..e16b3bd3 --- /dev/null +++ b/flagai/model/vision/layers/activations.py @@ -0,0 +1,145 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + NOTE: I don't have a working inplace variant + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + + def forward(self, x): + return mish(x) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + +def hard_mish(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + if inplace: + return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) + else: + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_mish(x, self.inplace) + + +class PReLU(nn.PReLU): + """Applies PReLU (w/ dummy inplace arg) + """ + def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/flagai/model/vision/layers/activations_jit.py b/flagai/model/vision/layers/activations_jit.py new file mode 100755 index 00000000..b4a51653 --- /dev/null +++ b/flagai/model/vision/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/flagai/model/vision/layers/activations_me.py b/flagai/model/vision/layers/activations_me.py new file mode 100755 index 00000000..9a12bb7e --- /dev/null +++ b/flagai/model/vision/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/flagai/model/vision/layers/adaptive_avgmax_pool.py b/flagai/model/vision/layers/adaptive_avgmax_pool.py new file mode 100755 index 00000000..ebc6ada8 --- /dev/null +++ b/flagai/model/vision/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/flagai/model/vision/layers/attention_pool2d.py b/flagai/model/vision/layers/attention_pool2d.py new file mode 100755 index 00000000..a13a6881 --- /dev/null +++ b/flagai/model/vision/layers/attention_pool2d.py @@ -0,0 +1,131 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .pos_embed import apply_rot_embed, RotaryEmbedding +from .weight_init import trunc_normal_ + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + sin_emb, cos_emb = self.pos_embed.get_embed((H, W)) + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/flagai/model/vision/layers/blur_pool.py b/flagai/model/vision/layers/blur_pool.py new file mode 100755 index 00000000..e73d8863 --- /dev/null +++ b/flagai/model/vision/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=self.channels) diff --git a/flagai/model/vision/layers/bottleneck_attn.py b/flagai/model/vision/layers/bottleneck_attn.py new file mode 100755 index 00000000..c3db464e --- /dev/null +++ b/flagai/model/vision/layers/bottleneck_attn.py @@ -0,0 +1,157 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). + num_heads (int): parallel attention heads (default: 4) + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, + qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + + self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.pos_embed.height, '') + _assert(W == self.pos_embed.width, '') + + x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W + + # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v + # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. + q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) + k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k + v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) + + if self.scale_pos_embed: + attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W + else: + attn = (q @ k) * self.scale + self.pos_embed(q) + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W + out = self.pool(out) + return out diff --git a/flagai/model/vision/layers/cbam.py b/flagai/model/vision/layers/cbam.py new file mode 100755 index 00000000..576a8306 --- /dev/null +++ b/flagai/model/vision/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvNormAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/flagai/model/vision/layers/classifier.py b/flagai/model/vision/layers/classifier.py new file mode 100755 index 00000000..3ac33387 --- /dev/null +++ b/flagai/model/vision/layers/classifier.py @@ -0,0 +1,56 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + fc = nn.Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + if pre_logits: + return x.flatten(1) + else: + x = self.fc(x) + return self.flatten(x) diff --git a/flagai/model/vision/layers/cond_conv2d.py b/flagai/model/vision/layers/cond_conv2d.py new file mode 100755 index 00000000..43654c59 --- /dev/null +++ b/flagai/model/vision/layers/cond_conv2d.py @@ -0,0 +1,123 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + # reshape instead of view to work with channels_last input + x = x.reshape(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/flagai/model/vision/layers/config.py b/flagai/model/vision/layers/config.py new file mode 100755 index 00000000..f07b9d78 --- /dev/null +++ b/flagai/model/vision/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/flagai/model/vision/layers/conv2d_same.py b/flagai/model/vision/layers/conv2d_same.py new file mode 100755 index 00000000..75f0f98d --- /dev/null +++ b/flagai/model/vision/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/flagai/model/vision/layers/conv_bn_act.py b/flagai/model/vision/layers/conv_bn_act.py new file mode 100755 index 00000000..af010573 --- /dev/null +++ b/flagai/model/vision/layers/conv_bn_act.py @@ -0,0 +1,73 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer + + +class ConvNormAct(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, drop_layer=None): + super(ConvNormAct, self).__init__() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +ConvBnAct = ConvNormAct + + +class ConvNormActAa(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, drop_layer=None): + super(ConvNormActAa, self).__init__() + use_aa = aa_layer is not None + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else nn.Identity() + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.aa(x) + return x diff --git a/flagai/model/vision/layers/create_act.py b/flagai/model/vision/layers/create_act.py new file mode 100755 index 00000000..e38f2e03 --- /dev/null +++ b/flagai/model/vision/layers/create_act.py @@ -0,0 +1,148 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if not isinstance(name, str): + # callable, module, etc + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs) diff --git a/flagai/model/vision/layers/create_attn.py b/flagai/model/vision/layers/create_attn.py new file mode 100755 index 00000000..028c0f75 --- /dev/null +++ b/flagai/model/vision/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type is not None: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/flagai/model/vision/layers/create_conv2d.py b/flagai/model/vision/layers/create_conv2d.py new file mode 100755 index 00000000..ac9489ce --- /dev/null +++ b/flagai/model/vision/layers/create_conv2d.py @@ -0,0 +1,36 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + if 'groups' in kwargs: + groups = kwargs.pop('groups') + if groups == in_channels: + kwargs['depthwise'] = True + else: + assert groups == 1 + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/flagai/model/vision/layers/create_norm_act.py b/flagai/model/vision/layers/create_norm_act.py new file mode 100755 index 00000000..cd15c2f8 --- /dev/null +++ b/flagai/model/vision/layers/create_norm_act.py @@ -0,0 +1,88 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +from .evo_norm import * +from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d +from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d +from .inplace_abn import InplaceAbn + +_NORM_ACT_MAP = dict( + batchnorm=BatchNormAct2d, + batchnorm2d=BatchNormAct2d, + groupnorm=GroupNormAct, + layernorm=LayerNormAct, + layernorm2d=LayerNormAct2d, + evonormb0=EvoNorm2dB0, + evonormb1=EvoNorm2dB1, + evonormb2=EvoNorm2dB2, + evonorms0=EvoNorm2dS0, + evonorms0a=EvoNorm2dS0a, + evonorms1=EvoNorm2dS1, + evonorms1a=EvoNorm2dS1a, + evonorms2=EvoNorm2dS2, + evonorms2a=EvoNorm2dS2a, + frn=FilterResponseNormAct2d, + frntlu=FilterResponseNormTlu2d, + inplaceabn=InplaceAbn, + iabn=InplaceAbn, +) +_NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} +# has act_layer arg to define act type +_NORM_ACT_REQUIRES_ARG = { + BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} + + +def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): + layer = get_norm_act_layer(layer_name, act_layer=act_layer) + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def get_norm_act_layer(norm_layer, act_layer=None): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + layer_name = norm_layer.replace('_', '').lower().split('-')[0] + norm_act_layer = _NORM_ACT_MAP.get(layer_name, None) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + elif type_name.startswith('layernorm2d'): + norm_act_layer = LayerNormAct2d + elif type_name.startswith('layernorm'): + norm_act_layer = LayerNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/flagai/model/vision/layers/drop.py b/flagai/model/vision/layers/drop.py new file mode 100755 index 00000000..ae065277 --- /dev/null +++ b/flagai/model/vision/layers/drop.py @@ -0,0 +1,166 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + block_mask = torch.empty_like(x).bernoulli_(gamma) + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.empty_like(x).normal_() + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + + def __init__( + self, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False, + fast: bool = True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) diff --git a/flagai/model/vision/layers/eca.py b/flagai/model/vision/layers/eca.py new file mode 100755 index 00000000..e29be6ac --- /dev/null +++ b/flagai/model/vision/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/flagai/model/vision/layers/evo_norm.py b/flagai/model/vision/layers/evo_norm.py new file mode 100755 index 00000000..b643302c --- /dev/null +++ b/flagai/model/vision/layers/evo_norm.py @@ -0,0 +1,350 @@ +""" EvoNorm in PyTorch + +Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 +@inproceedings{NEURIPS2020, + author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {13539--13550}, + publisher = {Curran Associates, Inc.}, + title = {Evolving Normalization-Activation Layers}, + url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, + volume = {33}, + year = {2020} +} + +An attempt at getting decent performing EvoNorms running in PyTorch. +While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm +in terms of memory usage and throughput on GPUs. + +I'm testing these modules on TPU w/ PyTorch XLA. Promising start but +currently working around some issues with builtin torch/tensor.var/std. Unlike +GPU, similar train speeds for EvoNormS variants and BatchNorm. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Sequence, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def instance_std(x, eps: float = 1e-5): + std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) + return std.expand(x.shape) + + +def instance_std_tpu(x, eps: float = 1e-5): + std = manual_var(x, dim=(2, 3)).add(eps).sqrt() + return std.expand(x.shape) +# instance_std = instance_std_tpu + + +def instance_rms(x, eps: float = 1e-5): + rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) + return rms.expand(x.shape) + + +def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): + xm = x.mean(dim=dim, keepdim=True) + if diff_sqm: + # difference of squared mean and mean squared, faster on TPU can be less stable + var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) + else: + var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) + return var + + +def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): + B, C, H, W = x.shape + x_dtype = x.dtype + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + else: + x = x.reshape(B, groups, C // groups, H, W) + std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + return std.expand(x.shape).reshape(B, C, H, W) + + +def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): + # This is a workaround for some stability / odd behaviour of .var and .std + # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results + B, C, H, W = x.shape + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + var = manual_var(x, dim=-1, diff_sqm=diff_sqm) + else: + x = x.reshape(B, groups, C // groups, H, W) + var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) + return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) +#group_std = group_std_tpu # FIXME TPU temporary + + +def group_rms(x, groups: int = 32, eps: float = 1e-5): + B, C, H, W = x.shape + _assert(C % groups == 0, '') + x_dtype = x.dtype + x = x.reshape(B, groups, C // groups, H, W) + rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) + return rms.expand(x.shape).reshape(B, C, H, W) + + +class EvoNorm2dB0(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + # var = manual_var(x, dim=(0, 2, 3)).squeeze() + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach() * self.momentum * (n / (n - 1))) + else: + var = self.running_var + left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) + v = self.v.to(x_dtype).view(v_shape) + right = x * v + instance_std(x, self.eps) + x = x / left.max(right) + return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) + + +class EvoNorm2dB1(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = (x + 1) * instance_rms(x, self.eps) + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dB2(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = instance_rms(x, self.eps) - x + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0(nn.Module): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0a(EvoNorm2dS0): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + d = group_std(x, self.groups, self.eps) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() + x = x / d + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.pre_act_norm = False + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1a(EvoNorm2dS1): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2a(EvoNorm2dS2): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) diff --git a/flagai/model/vision/layers/filter_response_norm.py b/flagai/model/vision/layers/filter_response_norm.py new file mode 100755 index 00000000..a66a1cd4 --- /dev/null +++ b/flagai/model/vision/layers/filter_response_norm.py @@ -0,0 +1,68 @@ +""" Filter Response Norm in PyTorch + +Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def inv_instance_rms(x, eps: float = 1e-5): + rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) + return rms.expand(x.shape) + + +class FilterResponseNormTlu2d(nn.Module): + def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): + super(FilterResponseNormTlu2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.tau is not None: + nn.init.zeros_(self.tau) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x + + +class FilterResponseNormAct2d(nn.Module): + def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): + super(FilterResponseNormAct2d, self).__init__() + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer, inplace=inplace) + else: + self.act = nn.Identity() + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return self.act(x) diff --git a/flagai/model/vision/layers/gather_excite.py b/flagai/model/vision/layers/gather_excite.py new file mode 100755 index 00000000..2d60dc96 --- /dev/null +++ b/flagai/model/vision/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/flagai/model/vision/layers/global_context.py b/flagai/model/vision/layers/global_context.py new file mode 100755 index 00000000..de7fb5c1 --- /dev/null +++ b/flagai/model/vision/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/flagai/model/vision/layers/halo_attn.py b/flagai/model/vision/layers/halo_attn.py new file mode 100755 index 00000000..f2ac64f8 --- /dev/null +++ b/flagai/model/vision/layers/halo_attn.py @@ -0,0 +1,233 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) + stride: output stride of the module, query downscaled if > 1 (default: 1). + num_heads: parallel attention heads (default: 8). + dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + block_size (int): size of blocks. (default: 8) + halo_size (int): size of halo overlap. (default: 3) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool) : add bias to q, k, and v projections + avg_down (bool): use average pool downsample instead of strided query blocks + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, + qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + assert stride in (1, 2) + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + self.block_size = self.block_size_ds = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.block_stride = 1 + use_avg_pool = False + if stride > 1: + use_avg_pool = avg_down or block_size % stride != 0 + self.block_stride = 1 if use_avg_pool else stride + self.block_size_ds = self.block_size // self.block_stride + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H % self.block_size == 0, '') + _assert(W % self.block_size == 0, '') + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + + q = self.q(x) + # unfold + q = q.reshape( + -1, self.dim_head_qk, + num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not + # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. + # FIXME figure out how to switch impl between this and conv2d if XLA being used. + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + if self.scale_pos_embed: + attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale + else: + attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) + # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view( + B, self.dim_out_v, H // self.block_stride, W // self.block_stride) + # B, dim_out, H // block_stride, W // block_stride + out = self.pool(out) + return out + + +""" Three alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if is_xla: + # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is + # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. + WW = self.win_size ** 2 + pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) + kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) + elif self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/flagai/model/vision/layers/helpers.py b/flagai/model/vision/layers/helpers.py new file mode 100755 index 00000000..cc54ca7f --- /dev/null +++ b/flagai/model/vision/layers/helpers.py @@ -0,0 +1,31 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v diff --git a/flagai/model/vision/layers/inplace_abn.py b/flagai/model/vision/layers/inplace_abn.py new file mode 100755 index 00000000..a8088933 --- /dev/null +++ b/flagai/model/vision/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_layer=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer is None or act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/flagai/model/vision/layers/lambda_layer.py b/flagai/model/vision/layers/lambda_layer.py new file mode 100755 index 00000000..e50b43c8 --- /dev/null +++ b/flagai/model/vision/layers/lambda_layer.py @@ -0,0 +1,133 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ + + +def rel_pos_indices(size): + size = to_2tuple(size) + pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) + rel_pos = pos[:, None, :] - pos[:, :, None] + rel_pos[0] += size[0] - 1 + rel_pos[1] += size[1] - 1 + return rel_pos # 2, H * W, H * W + + +class LambdaLayer(nn.Module): + """Lambda Layer + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + + NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. + + The internal dimensions of the lambda module are controlled via the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query (q) and key (k) dimension are determined by + * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None + * q = num_heads * dim_head, k = dim_head + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W + stride (int): output stride of the module, avg pool used if stride == 2 + num_heads (int): parallel attention heads. + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, + qk_ratio=1.0, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.num_heads = num_heads + self.dim_v = dim_out // num_heads + + self.qkv = nn.Conv2d( + dim, + num_heads * self.dim_qk + self.dim_qk + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + if r is not None: + # local lambda convolution for pos + self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) + self.pos_emb = None + self.rel_pos_indices = None + else: + # relative pos embedding + assert feat_size is not None + feat_size = to_2tuple(feat_size) + rel_size = [2 * s - 1 for s in feat_size] + self.conv_lambda = None + self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) + self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + if self.conv_lambda is not None: + trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) + if self.pos_emb is not None: + trunc_normal_(self.pos_emb, std=.02) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + if self.pos_emb is None: + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + else: + # FIXME relative pos embedding path not fully verified + pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) + position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/flagai/model/vision/layers/linear.py b/flagai/model/vision/layers/linear.py new file mode 100755 index 00000000..38fe3380 --- /dev/null +++ b/flagai/model/vision/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/flagai/model/vision/layers/median_pool.py b/flagai/model/vision/layers/median_pool.py new file mode 100755 index 00000000..40bd71a7 --- /dev/null +++ b/flagai/model/vision/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/flagai/model/vision/layers/mixed_conv2d.py b/flagai/model/vision/layers/mixed_conv2d.py new file mode 100755 index 00000000..fa0ce565 --- /dev/null +++ b/flagai/model/vision/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/flagai/model/vision/layers/ml_decoder.py b/flagai/model/vision/layers/ml_decoder.py new file mode 100755 index 00000000..3f828c6d --- /dev/null +++ b/flagai/model/vision/layers/ml_decoder.py @@ -0,0 +1,156 @@ +from typing import Optional + +import torch +from torch import nn +from torch import nn, Tensor +from torch.nn.modules.transformer import _get_activation_fn + + +def add_ml_decoder_head(model): + if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 + model.global_pool = nn.Identity() + del model.fc + num_classes = model.num_classes + num_features = model.num_features + model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet + model.global_pool = nn.Identity() + del model.classifier + num_classes = model.num_classes + num_features = model.num_features + model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') + del model.head + num_classes = model.num_classes + num_features = model.num_features + model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + else: + print("Model code-writing is not aligned currently with ml-decoder") + exit(-1) + if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout + model.drop_rate = 0 + return model + + +class TransformerDecoderLayerOptimal(nn.Module): + def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", + layer_norm_eps=1e-5) -> None: + super(TransformerDecoderLayerOptimal, self).__init__() + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.dropout = nn.Dropout(dropout) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) + + self.activation = _get_activation_fn(activation) + + def __setstate__(self, state): + if 'activation' not in state: + state['activation'] = torch.nn.functional.relu + super(TransformerDecoderLayerOptimal, self).__setstate__(state) + + def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: + tgt = tgt + self.dropout1(tgt) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(tgt, memory, memory)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + +# @torch.jit.script +# class ExtrapClasses(object): +# def __init__(self, num_queries: int, group_size: int): +# self.num_queries = num_queries +# self.group_size = group_size +# +# def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: +# torch.Tensor): +# # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) +# h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) +# w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) +# out = (h * w).sum(dim=2) + class_embed_b +# out = out.view((h.shape[0], self.group_size * self.num_queries)) +# return out + +@torch.jit.script +class GroupFC(object): + def __init__(self, embed_len_decoder: int): + self.embed_len_decoder = embed_len_decoder + + def __call__(self, h: torch.Tensor, duplicate_pooling: torch.Tensor, out_extrap: torch.Tensor): + for i in range(self.embed_len_decoder): + h_i = h[:, i, :] + w_i = duplicate_pooling[i, :, :] + out_extrap[:, i, :] = torch.matmul(h_i, w_i) + + +class MLDecoder(nn.Module): + def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): + super(MLDecoder, self).__init__() + embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups + if embed_len_decoder > num_classes: + embed_len_decoder = num_classes + + # switching to 768 initial embeddings + decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding + self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) + + # decoder + decoder_dropout = 0.1 + num_layers_decoder = 1 + dim_feedforward = 2048 + layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, + dim_feedforward=dim_feedforward, dropout=decoder_dropout) + self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) + + # non-learnable queries + self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) + self.query_embed.requires_grad_(False) + + # group fully-connected + self.num_classes = num_classes + self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) + self.duplicate_pooling = torch.nn.Parameter( + torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) + self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) + torch.nn.init.xavier_normal_(self.duplicate_pooling) + torch.nn.init.constant_(self.duplicate_pooling_bias, 0) + self.group_fc = GroupFC(embed_len_decoder) + + def forward(self, x): + if len(x.shape) == 4: # [bs,2048, 7,7] + embedding_spatial = x.flatten(2).transpose(1, 2) + else: # [bs, 197,468] + embedding_spatial = x + embedding_spatial_786 = self.embed_standart(embedding_spatial) + embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) + + bs = embedding_spatial_786.shape[0] + query_embed = self.query_embed.weight + # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) + tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand + h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] + h = h.transpose(0, 1) + + out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) + self.group_fc(h, self.duplicate_pooling, out_extrap) + h_out = out_extrap.flatten(1)[:, :self.num_classes] + h_out += self.duplicate_pooling_bias + logits = h_out + return logits diff --git a/flagai/model/vision/layers/mlp.py b/flagai/model/vision/layers/mlp.py new file mode 100755 index 00000000..91e80a84 --- /dev/null +++ b/flagai/model/vision/layers/mlp.py @@ -0,0 +1,126 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features // 2, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, + norm_layer=None, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.drop = nn.Dropout(drop) + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/flagai/model/vision/layers/non_local_attn.py b/flagai/model/vision/layers/non_local_attn.py new file mode 100755 index 00000000..670e8f24 --- /dev/null +++ b/flagai/model/vision/layers/non_local_attn.py @@ -0,0 +1,145 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvNormAct +from .helpers import make_divisible +from .trace_utils import _assert + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + _assert(block_size == block_size1, '') + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + _assert(x.shape[-1] % self.block_size == 0, '') + _assert(x.shape[-2] % self.block_size == 0, '') + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/flagai/model/vision/layers/norm.py b/flagai/model/vision/layers/norm.py new file mode 100755 index 00000000..85297420 --- /dev/null +++ b/flagai/model/vision/layers/norm.py @@ -0,0 +1,24 @@ +""" Normalization layers and wrappers +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + + def forward(self, x): + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial BCHW tensors """ + def __init__(self, num_channels): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) diff --git a/flagai/model/vision/layers/norm_act.py b/flagai/model/vision/layers/norm_act.py new file mode 100755 index 00000000..34c4fd64 --- /dev/null +++ b/flagai/model/vision/layers/norm_act.py @@ -0,0 +1,151 @@ +""" Normalization + Activation Layers +""" +from typing import Union, List + +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .trace_utils import _assert +from .create_act import get_act_layer + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__( + self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing + _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') + + # exponential_average_factor is set to self.momentum + # (when it is available) only so that it gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: # type: ignore[has-type] + self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore[has-type] + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + r""" + Decide whether the mini-batch stats should be used for normalization rather than the buffers. + Mini-batch stats are used in training mode, and in eval mode when buffers are None. + """ + if self.training: + bn_training = True + else: + bn_training = (self.running_mean is None) and (self.running_var is None) + + r""" + Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be + passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are + used for normalization (i.e. in eval mode when buffers are not None). + """ + x = F.batch_norm( + x, + # If buffers are not to be tracked, ensure that they won't be updated + self.running_mean if not self.training or self.track_running_stats else None, + self.running_var if not self.training or self.track_running_stats else None, + self.weight, + self.bias, + bn_training, + exponential_average_factor, + self.eps, + ) + x = self.drop(x) + x = self.act(x) + return x + + +def _num_groups(num_channels, num_groups, group_size): + if group_size: + assert num_channels % group_size == 0 + return num_channels // group_size + return num_groups + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__( + self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(GroupNormAct, self).__init__( + _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.drop(x) + x = self.act(x) + return x + + +class LayerNormAct(nn.LayerNorm): + def __init__( + self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + x = self.drop(x) + x = self.act(x) + return x + + +class LayerNormAct2d(nn.LayerNorm): + def __init__( + self, num_channels, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) + x = self.drop(x) + x = self.act(x) + return x diff --git a/flagai/model/vision/layers/padding.py b/flagai/model/vision/layers/padding.py new file mode 100755 index 00000000..34afc37c --- /dev/null +++ b/flagai/model/vision/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/flagai/model/vision/layers/patch_embed.py b/flagai/model/vision/layers/patch_embed.py new file mode 100755 index 00000000..b074798b --- /dev/null +++ b/flagai/model/vision/layers/patch_embed.py @@ -0,0 +1,40 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple +from .trace_utils import _assert + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/flagai/model/vision/layers/pool2d_same.py b/flagai/model/vision/layers/pool2d_same.py new file mode 100755 index 00000000..4c2a1c44 --- /dev/null +++ b/flagai/model/vision/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/flagai/model/vision/layers/pos_embed.py b/flagai/model/vision/layers/pos_embed.py new file mode 100755 index 00000000..99a122a0 --- /dev/null +++ b/flagai/model/vision/layers/pos_embed.py @@ -0,0 +1,207 @@ +import math +from typing import List, Tuple, Optional, Union + +import torch +from torch import nn as nn + + +def pixel_freq_bands( + num_bands: int, + max_freq: float = 224., + linear_bands: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + if linear_bands: + bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device) + else: + bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device) + return bands * torch.pi + + +def inv_freq_bands( + num_bands: int, + temperature: float = 100000., + step: int = 2, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> torch.Tensor: + inv_freq = 1. / (temperature ** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands)) + return inv_freq + + +def build_sincos2d_pos_embed( + feat_shape: List[int], + dim: int = 64, + temperature: float = 10000., + reverse_coord: bool = False, + interleave_sin_cos: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None +) -> torch.Tensor: + """ + + Args: + feat_shape: + dim: + temperature: + reverse_coord: stack grid order W, H instead of H, W + interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos + dtype: + device: + + Returns: + + """ + assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' + pos_dim = dim // 4 + bands = inv_freq_bands(pos_dim, temperature=temperature, step=1, dtype=dtype, device=device) + + if reverse_coord: + feat_shape = feat_shape[::-1] # stack W, H instead of H, W + grid = torch.stack( + torch.meshgrid([torch.arange(s, device=device, dtype=dtype) for s in feat_shape])).flatten(1).transpose(0, 1) + pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) + # FIXME add support for unflattened spatial dim? + + stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos + pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) + return pos_emb + + +def build_fourier_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + num_bands: int = 64, + max_res: int = 224, + linear_bands: bool = False, + include_grid: bool = False, + concat_out: bool = True, + in_pixels: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> List[torch.Tensor]: + if bands is None: + if in_pixels: + bands = pixel_freq_bands(num_bands, float(max_res), linear_bands=linear_bands, dtype=dtype, device=device) + else: + bands = inv_freq_bands(num_bands, step=1, dtype=dtype, device=device) + else: + if device is None: + device = bands.device + if dtype is None: + dtype = bands.dtype + + if in_pixels: + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in feat_shape]), dim=-1) + else: + grid = torch.stack(torch.meshgrid( + [torch.arange(s, device=device, dtype=dtype) for s in feat_shape]), dim=-1) + grid = grid.unsqueeze(-1) + pos = grid * bands + + pos_sin, pos_cos = pos.sin(), pos.cos() + out = (grid, pos_sin, pos_cos) if include_grid else (pos_sin, pos_cos) + # FIXME torchscript doesn't like multiple return types, probably need to always cat? + if concat_out: + out = torch.cat(out, dim=-1) + return out + + +class FourierEmbed(nn.Module): + + def __init__(self, max_res: int = 224, num_bands: int = 64, concat_grid=True, keep_spatial=False): + super().__init__() + self.max_res = max_res + self.num_bands = num_bands + self.concat_grid = concat_grid + self.keep_spatial = keep_spatial + self.register_buffer('bands', pixel_freq_bands(max_res, num_bands), persistent=False) + + def forward(self, x): + B, C = x.shape[:2] + feat_shape = x.shape[2:] + emb = build_fourier_pos_embed( + feat_shape, + self.bands, + include_grid=self.concat_grid, + dtype=x.dtype, + device=x.device) + emb = emb.transpose(-1, -2).flatten(len(feat_shape)) + batch_expand = (B,) + (-1,) * (x.ndim - 1) + + # FIXME support nD + if self.keep_spatial: + x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) + else: + x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) + x = x.reshape(B, feat_shape.numel(), -1) + + return x + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +def apply_rot_embed_split(x: torch.Tensor, emb): + split = emb.shape[-1] // 2 + return x * emb[:, :split] + rot(x) * emb[:, split:] + + +def build_rotary_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + dim: int = 64, + max_freq: float = 224, + linear_bands: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + """ + NOTE: shape arg should include spatial dim only + """ + feat_shape = torch.Size(feat_shape) + + sin_emb, cos_emb = build_fourier_pos_embed( + feat_shape, bands=bands, num_bands=dim // 4, max_res=max_freq, linear_bands=linear_bands, + concat_out=False, device=device, dtype=dtype) + N = feat_shape.numel() + sin_emb = sin_emb.reshape(N, -1).repeat_interleave(2, -1) + cos_emb = cos_emb.reshape(N, -1).repeat_interleave(2, -1) + return sin_emb, cos_emb + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_res=224, linear_bands: bool = False): + super().__init__() + self.dim = dim + self.register_buffer('bands', pixel_freq_bands(dim // 4, max_res, linear_bands=linear_bands), persistent=False) + + def get_embed(self, shape: List[int]): + return build_rotary_pos_embed(shape, self.bands) + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) diff --git a/flagai/model/vision/layers/selective_kernel.py b/flagai/model/vision/layers/selective_kernel.py new file mode 100755 index 00000000..3d71e3aa --- /dev/null +++ b/flagai/model/vision/layers/selective_kernel.py @@ -0,0 +1,119 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvNormActAa +from .helpers import make_divisible +from .trace_utils import _assert + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + _assert(x.shape[1] == self.num_paths, '') + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + aa_layer (nn.Module): anti-aliasing module + drop_layer (nn.Module): spatial drop module in convs (drop block, etc) + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer, drop_layer=drop_layer) + self.paths = nn.ModuleList([ + ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/flagai/model/vision/layers/separable_conv.py b/flagai/model/vision/layers/separable_conv.py new file mode 100755 index 00000000..c081e02b --- /dev/null +++ b/flagai/model/vision/layers/separable_conv.py @@ -0,0 +1,76 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer + + +class SeparableConvNormAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_layer=None): + super(SeparableConvNormAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + x = self.bn(x) + return x + + +SeparableConvBnAct = SeparableConvNormAct + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/flagai/model/vision/layers/space_to_depth.py b/flagai/model/vision/layers/space_to_depth.py new file mode 100755 index 00000000..a7e8e0b2 --- /dev/null +++ b/flagai/model/vision/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/flagai/model/vision/layers/split_attn.py b/flagai/model/vision/layers/split_attn.py new file mode 100755 index 00000000..ac54f898 --- /dev/null +++ b/flagai/model/vision/layers/split_attn.py @@ -0,0 +1,84 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + x = self.drop(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/flagai/model/vision/layers/split_batchnorm.py b/flagai/model/vision/layers/split_batchnorm.py new file mode 100755 index 00000000..830781b3 --- /dev/null +++ b/flagai/model/vision/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/flagai/model/vision/layers/squeeze_excite.py b/flagai/model/vision/layers/squeeze_excite.py new file mode 100755 index 00000000..e5da29ef --- /dev/null +++ b/flagai/model/vision/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/flagai/model/vision/layers/std_conv.py b/flagai/model/vision/layers/std_conv.py new file mode 100755 index 00000000..d896ba5c --- /dev/null +++ b/flagai/model/vision/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/flagai/model/vision/layers/test_time_pool.py b/flagai/model/vision/layers/test_time_pool.py new file mode 100755 index 00000000..98c0bf53 --- /dev/null +++ b/flagai/model/vision/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=True): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/flagai/model/vision/layers/trace_utils.py b/flagai/model/vision/layers/trace_utils.py new file mode 100755 index 00000000..83970729 --- /dev/null +++ b/flagai/model/vision/layers/trace_utils.py @@ -0,0 +1,13 @@ +try: + from torch import _assert +except ImportError: + def _assert(condition: bool, message: str): + assert condition, message + + +def _float_to_int(x: float) -> int: + """ + Symbolic tracing helper to substitute for inbuilt `int`. + Hint: Inbuilt `int` can't accept an argument of type `Proxy` + """ + return int(x) diff --git a/flagai/model/vision/layers/weight_init.py b/flagai/model/vision/layers/weight_init.py new file mode 100755 index 00000000..305a2fd0 --- /dev/null +++ b/flagai/model/vision/layers/weight_init.py @@ -0,0 +1,89 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/flagai/model/vision/vit.py b/flagai/model/vision/vit.py new file mode 100644 index 00000000..d2fe1ef5 --- /dev/null +++ b/flagai/model/vision/vit.py @@ -0,0 +1,505 @@ +""" +# Copyright © 2022 BAAI. All rights reserved. +""" + +""" +Vision Transformer (ViT) in PyTorch +A PyTorch implement of Vision Transformers as described in: +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 +The official jax code is released and available at https://github.com/google-research/vision_transformer +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert +Hacked together by / Copyright 2020, Ross Wightman +""" + +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint +from typing import Callable + +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) +IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) +IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) + +from flagai.model.vision.layers.patch_embed import PatchEmbed +from flagai.model.vision.layers.mlp import Mlp +from flagai.model.vision.layers.drop import DropPath +from flagai.model.vision.layers.weight_init import trunc_normal_, lecun_normal_ +from flagai.model.base_model import BaseModel + +class VitConfig: + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + init_values=None, + class_token=True, + fc_norm=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + weight_init='', + checkpoint_activations=None): + pass + self.img_size=img_size + self.patch_size=patch_size + self.in_chans=in_chans + self.num_classes=num_classes + self.global_pool=global_pool + self.embed_dim=embed_dim + self.depth=depth + self.num_heads=num_heads + self.mlp_ratio=mlp_ratio + self.qkv_bias=qkv_bias + self.init_values=init_values + self.class_token=class_token + self.fc_norm=fc_norm + self.drop_rate=drop_rate + self.attn_drop_rate=attn_drop_rate + self.drop_path_rate=drop_path_rate + self.weight_init=weight_init + self.checkpoint_activations = checkpoint_activations + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class Block(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., init_values=None, + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + +class VisionTransformer(BaseModel): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + """ + + def __init__( + self, config, num_classes=1000): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + global_pool (str): type of global pooling for final sequence (default: 'token') + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + init_values: (float): layer-scale init values + class_token (bool): use class token + fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None) + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + weight_init (str): weight init scheme + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + act_layer: (nn.Module): MLP activation layer + """ + super().__init__(config) + embed_layer=PatchEmbed + block_fn=Block + vit_config = VitConfig(**config) + vit_config.num_classes = num_classes + config = vit_config + + assert config.global_pool in ('', 'avg', 'token') + assert config.class_token or config.global_pool != 'token' + use_fc_norm = config.global_pool == 'avg' if config.fc_norm is None else config.fc_norm + norm_layer = partial(nn.LayerNorm, eps=1e-6) + act_layer = nn.GELU + + self.num_classes = num_classes + self.global_pool = config.global_pool + self.num_features = self.embed_dim = config.embed_dim # num_features for consistency with other models + self.num_tokens = 1 if config.class_token else 0 + self.grad_checkpointing = False + + self.patch_embed = embed_layer( + img_size=config.img_size, patch_size=config.patch_size, in_chans=config.in_chans, embed_dim=config.embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if self.num_tokens > 0 else None + self.pos_embed = nn.Parameter(torch.randn(1, num_patches + self.num_tokens, config.embed_dim) * .02) + self.pos_drop = nn.Dropout(p=config.drop_rate) + + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + block_fn( + dim=config.embed_dim, num_heads=config.num_heads, mlp_ratio=config.mlp_ratio, qkv_bias=config.qkv_bias, init_values=config.init_values, + drop=config.drop_rate, attn_drop=config.attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(config.depth)]) + self.norm = norm_layer(config.embed_dim) if not use_fc_norm else nn.Identity() + + # Classifier Head + self.fc_norm = norm_layer(config.embed_dim) if use_fc_norm else nn.Identity() + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if config.weight_init != 'skip': + self.init_weights(config.weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'moco', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.cls_token is not None: + nn.init.normal_(self.cls_token, std=1e-6) + named_apply(get_init_weights_vit(mode, head_bias), self) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + init_weights_vit_timm(m) + + @torch.jit.ignore() + def load_weights(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes: int, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + # if self.grad_checkpointing and not torch.jit.is_scripting(): + # x = checkpoint_seq(self.blocks, x) + # else: + x = self.blocks(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + return x if pre_logits else self.head(x) + + def compute_loss(self, logits, labels): + loss_func = nn.CrossEntropyLoss() + return loss_func(logits, labels) + + def forward(self, images=None, labels=None, **kwargs): + + x = self.forward_features(images) + x = self.forward_head(x) + loss = None + if labels is not None: + loss = self.compute_loss(x, labels) + return_data = {"logits": x, "hidden_states": x, "loss": loss} + + return return_data + + +def init_weights_vit_timm(module: nn.Module, name: str = ''): + """ ViT weight initialization, original timm impl (for reproducibility) """ + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.): + """ ViT weight initialization, matching JAX (Flax) impl """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_moco(module: nn.Module, name: str = ''): + """ ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """ + if isinstance(module, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) + nn.init.uniform_(module.weight, -val, val) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_init_weights_vit(mode='jax', head_bias: float = 0.): + if 'jax' in mode: + return partial(init_weights_vit_jax, head_bias=head_bias) + elif 'moco' in mode: + return init_weights_vit_moco + else: + return init_weights_vit_timm + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights + # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + +def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + elif 'pre_logits' in k: + # NOTE representation layer removed as not used in latest 21k/1k pretrained weights + continue + out_dict[k] = v + return out_dict + + + + diff --git a/flagai/trainer.py b/flagai/trainer.py index 911fe653..ae5aeefd 100644 --- a/flagai/trainer.py +++ b/flagai/trainer.py @@ -198,22 +198,31 @@ def __init__( self.not_call_launch = True self.deepspeed_config = deepspeed_config self.model_parallel_size = model_parallel_size - if 'deepspeed' in env_type or env_type == 'pytorchDDP': + self.num_nodes = num_nodes + self.num_gpus = num_gpus + self.master_ip = master_ip + self.master_port = master_port + self.hostfile = hostfile + self.training_script = training_script + + training_paras = self.get_dist_args() + + if 'deepspeed' in self.env_type or self.env_type == 'pytorchDDP': # Implement for AutoLaunch # >>> python train.py # will call get_dist_args() # `--not_call_launch` is default 'False' # So, if `env_type` is `pytorch`, the `Trainer` will not call lanch_dist() # Otherwise, the lanch_dist() is called to launch 'train.py' with `--not_call_launch` - self.get_dist_args() if not self.not_call_launch: launch_dist(launcher='distributed_deepspeed' if 'deepspeed' in env_type else 'distributed_torch', - num_nodes=num_nodes, - gpus_per_node=num_gpus, - master_addr=master_ip, - master_port=master_port, - hostfile=hostfile, - training_script=training_script) + num_nodes=self.num_nodes, + gpus_per_node=self.num_gpus, + master_addr=self.master_ip, + master_port=self.master_port, + hostfile=self.hostfile, + training_script=self.training_script, + training_paras=training_paras) os._exit(1) self.initialize_distributed() @@ -239,6 +248,7 @@ def get_dist_args(self): self.master_addr = os.environ.get('MASTER_ADDR', '127.0.0.1') self.master_port = os.environ.get('MASTER_PORT', '17500') log_dist("not_call_launch: {}".format(ds_args.not_call_launch)) + return [] def set_seed(self, seed=1234): """Set random seed for reproducability.""" @@ -513,7 +523,8 @@ def train(self, lr_scheduler, single_step=True) dist.barrier() - total_lm_loss += lm_loss.data.detach().float() + if lm_loss is not None: + total_lm_loss += lm_loss.data.detach().float() # Logging. if (self.iteration + 1) % self.log_interval == 0: @@ -1032,3 +1043,173 @@ def evaluate_and_print_results( log_dist(string, [0]) log_dist('-' * length, [0]) return eval_dict + + +class BatchTrainer(Trainer): + def __init__(self): + super(BatchTrainer, self).__init__() + + def get_dist_args(self): + parser = argparse.ArgumentParser() + parser.add_argument('--local_rank', + type=int, + default=0, + help="local_rank") + parser.add_argument('--env_type', + type=str, + required=True, + help="env_type: pytorch, pytorchDDP, deepspeed, deepspeed+mpu") + parser.add_argument('--not_call_launch', + action='store_true', + help="not call launch!") + parser.add_argument('--experiment_name', + type=str, + default="test", + help="experiment_name") + parser.add_argument('--batch_size', + type=int, + default=1, + help="batch size") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="gradient_accumulation_steps") + parser.add_argument('--lr', + type=float, + default=1e-5, + help="learning rate") + parser.add_argument('--weight_decay', + type=float, + default=1e-3, + help="weight_decay") + parser.add_argument('--epochs', + type=int, + default=2, + help="epochs") + parser.add_argument('--fp16', + type=bool, + default=False, + help="fp16") + parser.add_argument('--log_interval', + type=int, + default=10, + help="log_interval") + + parser.add_argument('--eval_interval', + type=int, + default=1000, + help="eval_interval") + parser.add_argument('--load_dir', + type=str, + default=None, + help="load_dir") + parser.add_argument('--save_dir', + type=str, + default="./checkpoints", + help="save_dir") + parser.add_argument('--save_interval', + type=int, + default=1000, + help="save_interval") + parser.add_argument('--num_checkpoints', + type=int, + default=1, + help="num_checkpoints") + parser.add_argument('--pytorch_device', + type=str, + default="cpu", + help="pytorch_device") + parser.add_argument('--num_nodes', + type=int, + default=1, + help="num_nodes") + parser.add_argument('--num_gpus', + type=int, + default=1, + help="num_gpus") + parser.add_argument('--deepspeed_config', + type=str, + default="./deepspeed.json", + help="deepspeed_config") + parser.add_argument('--hostfile', + type=str, + default="hostfile", + help="hostfile") + parser.add_argument('--model_parallel_size', + type=int, + default=1, + help="model_parallel_size") + parser.add_argument('--training_script', + type=str, + default="train.py", + help="training_script") + parser.add_argument('--master_ip', + type=str, + default="127.0.0.1", + help="master_ip") + parser.add_argument('--master_port', + type=int, + default=17500, + help="master_ip") + + ds_args = parser.parse_args() + self.local_rank = ds_args.local_rank + self.not_call_launch = ds_args.not_call_launch + self.rank = int(os.environ.get('RANK', 0)) + self.world_size = int(os.environ.get('WORLD_SIZE', 1)) + self.master_addr = ds_args.master_ip + self.master_port = ds_args.master_port + self.env_type = ds_args.env_type + self.experiment_name = ds_args.experiment_name + self.batch_size = ds_args.batch_size + self.gradient_accumulation_steps = ds_args.gradient_accumulation_steps + self.lr = ds_args.lr + self.weight_decay = ds_args.weight_decay + self.epochs = ds_args.epochs + self.fp16 = ds_args.fp16 + self.log_interval = ds_args.log_interval + self.eval_interval = ds_args.eval_interval + self.load_dir = ds_args.load_dir + self.save_dir = ds_args.save_dir + self.save_interval = ds_args.save_interval + self.num_checkpoints = ds_args.num_checkpoints + self.pytorch_device = ds_args.pytorch_device + self.num_nodes = ds_args.num_nodes + self.num_gpus = ds_args.num_gpus + self.deepspeed_config = ds_args.deepspeed_config + self.hostfile = ds_args.hostfile + self.model_parallel_size = ds_args.model_parallel_size + self.training_script = ds_args.training_script + + log_dist("not_call_launch: {}".format(ds_args.not_call_launch)) + + return [ + "--env_type", + self.env_type, + "--experiment_name", + self.experiment_name, + "--batch_size", + str(self.batch_size), + "--gradient_accumulation_steps", + str(self.gradient_accumulation_steps), + "--lr", + str(self.lr), + "--weight_decay", + str(self.weight_decay), + "--epochs", + str(self.epochs), + "--log_interval", + str(self.log_interval), + "--eval_interval", + str(self.eval_interval), + "--load_dir", + str(self.load_dir), + "--save_dir", + str(self.save_dir), + "--save_interval", + str(self.save_interval), + "--num_checkpoints", + str(self.num_checkpoints), + "--fp16", + str(self.fp16) + ] \ No newline at end of file diff --git a/flagai/utils.py b/flagai/utils.py index cb130a81..06c30026 100644 --- a/flagai/utils.py +++ b/flagai/utils.py @@ -206,8 +206,7 @@ def save_checkpoint(iteration, sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states() if env_type == 'pytorch' or (env_type != 'deepspeed+mpu' and dist.get_rank() == 0) or ( - env_type == 'deepspeed+mpu' - and mpu.get_data_parallel_group() == 0): + env_type == 'deepspeed+mpu'and mpu.get_model_parallel_src_rank() == 0): ensure_directory_exists(checkpoint_name) config_path = os.path.join(save_dir, str(iteration), 'config.json') @@ -220,6 +219,7 @@ def save_checkpoint(iteration, tracker_filename = get_checkpoint_tracker_filename(save_dir) with open(tracker_filename, 'w') as f: f.write(str(iteration) + '\t' + str(best_iteration)) + # Wait so everyone is done (necessary) if barrier and dist.is_initialized(): torch.distributed.barrier() diff --git a/vit-cifar100/events.out.tfevents.1657856025.deepspeed.4989.0 b/vit-cifar100/events.out.tfevents.1657856025.deepspeed.4989.0 new file mode 100644 index 0000000000000000000000000000000000000000..629925f1f49f59377519a810168d6b81719a1747 GIT binary patch literal 40 rcmb1OfPlsI-b$RR`QB_6TW&Z?@g@}|X6EU+mZj#ESQ(v3ovsG}wHFIu literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4996.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4996.0 new file mode 100644 index 0000000000000000000000000000000000000000..63356ca8c99c67f3593db92c4a1f9384c8ebab25 GIT binary patch literal 40 rcmb1OfPlsI-b$Qvm*{X>Y`Nhm#hX-=n3<>NT9%quVrBHJqW=s4#o-Ld literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4997.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4997.0 new file mode 100644 index 0000000000000000000000000000000000000000..8bbc7ec6f3899c0f44a0414faa89e157783795ac GIT binary patch literal 40 rcmb1OfPlsI-b$PYDl9oIw%l-(;!P?_%*@ksElbTSu`)V%R+$9=#M%qA literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4998.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4998.0 new file mode 100644 index 0000000000000000000000000000000000000000..67b4ecc8be502e5a4b91a0e054b2c4d08ea163db GIT binary patch literal 40 rcmb1OfPlsI-b$Pf-3FW%TW&Z?@g@}|X6EU+mZj#ESQ+{5$&Ld6w@wRN literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4999.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4999.0 new file mode 100644 index 0000000000000000000000000000000000000000..266a8889e5c191c29e61cc6401eba5261efe111b GIT binary patch literal 40 rcmb1OfPlsI-b$PadN!OETW&Z?@g@}|X6EU+mZj#ESQ*_9oNyliv^op- literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5001.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5001.0 new file mode 100644 index 0000000000000000000000000000000000000000..d5b46b2a94377a687dab033ea8af988e2d93aa32 GIT binary patch literal 40 rcmb1OfPlsI-b$Q%gY`Krw%l-(;!P?_%*@ksElbTSu`-(coX-^izj_Or literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5003.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5003.0 new file mode 100644 index 0000000000000000000000000000000000000000..97add85310dddc2b8bf6e614cdd1cfef6eada004 GIT binary patch literal 40 rcmb1OfPlsI-b$SDnRc8OTW&Z?@g@}|X6EU+mZj#ESQ&9GTAKm@x2+3N literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5000.0 b/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5000.0 new file mode 100644 index 0000000000000000000000000000000000000000..9a0c7105792bb026f579b8272c482654737e191f GIT binary patch literal 40 rcmb1OfPlsI-b$QVlN~rMw%l-(;!P?_%*@ksElbTSu`=Se-?I$>yq61y literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5002.0 b/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5002.0 new file mode 100644 index 0000000000000000000000000000000000000000..3db8e36ed40b22b77945b3f801cccb8e61372c10 GIT binary patch literal 40 rcmb1OfPlsI-b$RugB>|7w%l-(;!P?_%*@ksElbTSu`;@l_aPPl!>NT9%quVr699<#HGR#~KUv literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5052.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5052.0 new file mode 100644 index 0000000000000000000000000000000000000000..2d922d9b4e9319e3674fe7363e5de2e0d444c78d GIT binary patch literal 40 rcmb1OfPlsI-b$QHxNmXTY`Nhm#hX-=n3<>NT9%quVr6uFY9I#y#^DR! literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5053.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5053.0 new file mode 100644 index 0000000000000000000000000000000000000000..d428e941dc0f2e5e8aab17726af6accb12807416 GIT binary patch literal 40 rcmb1OfPlsI-b$P++K+MAY`Nhm#hX-=n3<>NT9%quVr9ha%d-st&Fu@A literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5054.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5054.0 new file mode 100644 index 0000000000000000000000000000000000000000..a07af22777f759f7cbc91fcea852d015d37cfafb GIT binary patch literal 40 rcmb1OfPlsI-b$P&vTkzNY`Nhm#hX-=n3<>NT9%quVr4W{sI(IR(sB$z literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5055.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5055.0 new file mode 100644 index 0000000000000000000000000000000000000000..d8e166d6e0cbd190ba0cbd2b1e4e8a927e76e9f0 GIT binary patch literal 40 rcmb1OfPlsI-b$R3dDl2>w%l-(;!P?_%*@ksElbTSu`*h2dh;Ft!RQQu literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5056.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5056.0 new file mode 100644 index 0000000000000000000000000000000000000000..624d107bbce7e80f4ba852b87096eccab22c8270 GIT binary patch literal 40 rcmb1OfPlsI-b$Q@?_cMz*>b~CiZ`h!F*8rkwJbHS#LB2YeA!X}-p~y@ literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5057.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5057.0 new file mode 100644 index 0000000000000000000000000000000000000000..082cb34f76f2c3591f4a3281709f681fe39e08dc GIT binary patch literal 40 rcmb1OfPlsI-b$Pg*WTi=*>b~CiZ`h!F*8rkwJbHS#LDO%2V)ul+xHAc literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5058.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5058.0 new file mode 100644 index 0000000000000000000000000000000000000000..3b00c8de244566b088f48bb84c0b0b5c266d46b9 GIT binary patch literal 40 rcmb1OfPlsI-b$Q~o`*PWw%l-(;!P?_%*@ksElbTSu`>Gmy6G?g!y63> literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5059.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5059.0 new file mode 100644 index 0000000000000000000000000000000000000000..91f98d0677f6baf076cc38a940a535ba9564d268 GIT binary patch literal 40 rcmb1OfPlsI-b$ReZ7y)wY`Nhm#hX-=n3<>NT9%quVr4Y{%$5QG(V-0o literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657860187.deepspeed.5102.0 b/vit-cifar100/events.out.tfevents.1657860187.deepspeed.5102.0 new file mode 100644 index 0000000000000000000000000000000000000000..0ce5790673f582ca01c690b865f64f246065d168 GIT binary patch literal 40 rcmb1OfPlsI-b$SDRtLpww%l-(;!P?_%*@ksElbTSu`+tLFLOTt#N7NT9%quVr8`R$Nm%m);kSw literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5273.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5273.0 new file mode 100644 index 0000000000000000000000000000000000000000..6d265e24d3e9702095e4819216da8ee677dd6e26 GIT binary patch literal 40 rcmb1OfPlsI-b$P=JuS@aw%l-(;!P?_%*@ksElbTSu`+tBGrJxD$si0= literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5274.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5274.0 new file mode 100644 index 0000000000000000000000000000000000000000..8587821fb84bc10aab2690d3f48f0cfc48478b2f GIT binary patch literal 40 rcmb1OfPlsI-b$S3(@o6mw%l-(;!P?_%*@ksElbTSu`;?-wa^y;$+8S@ literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5276.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5276.0 new file mode 100644 index 0000000000000000000000000000000000000000..19bdd90a77fab6d4e29500a28ee771b597a5662d GIT binary patch literal 40 rcmb1OfPlsI-b$SJr(2uZZMoqn#hX-=n3<>NT9%quVr6vw>>3UL(WeZ` literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5277.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5277.0 new file mode 100644 index 0000000000000000000000000000000000000000..d070809fa2fff976409166604459f0b41976bb9d GIT binary patch literal 40 rcmb1OfPlsI-b$PnYKCTZTW&Z?@g@}|X6EU+mZj#ESQ+un%!mO1uc8XO literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5275.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5275.0 new file mode 100644 index 0000000000000000000000000000000000000000..ac348feea85ba8e8caa0e262e424608a8b39d7ce GIT binary patch literal 40 rcmb1OfPlsI-b$S2ehy}KTW&Z?@g@}|X6EU+mZj#ESQ#z-Y_bIaxM&Op literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5278.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5278.0 new file mode 100644 index 0000000000000000000000000000000000000000..b1193dcc058f459a15b7c500ba6c44813e5b521a GIT binary patch literal 40 rcmb1OfPlsI-b$SROr6c_w%l-(;!P?_%*@ksElbTSu`*&U;F|{k#=8qx literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5279.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5279.0 new file mode 100644 index 0000000000000000000000000000000000000000..b428ee386651a07525abf39cab1e456925049715 GIT binary patch literal 40 rcmb1OfPlsI-b$R6(_GB#w%l-(;!P?_%*@ksElbTSu`=q*nSBHR#s3U# literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5280.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5280.0 new file mode 100644 index 0000000000000000000000000000000000000000..4f1e216568e518d859b17889b647042d60cb7bf1 GIT binary patch literal 40 rcmb1OfPlsI-b$PkbKTACw%l-(;!P?_%*@ksElbTSu`;^$V#);o$2tut literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862493.deepspeed.5347.0 b/vit-cifar100/events.out.tfevents.1657862493.deepspeed.5347.0 new file mode 100644 index 0000000000000000000000000000000000000000..bf9a25398482db49e2a83e6f42d0521f9f84a52e GIT binary patch literal 40 rcmb1OfPlsI-b$QG^OM8vw%l-(;!P?_%*@ksElbTSu`=ojTKF3P!^jMP literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5354.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5354.0 new file mode 100644 index 0000000000000000000000000000000000000000..bce1f172343852d2421f7dba52a5c80c9eb67a3d GIT binary patch literal 40 rcmb1OfPlsI-b$SFTP&mOw%l-(;!P?_%*@ksElbTSu`)X6ZBqyU$_5Mv literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5355.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5355.0 new file mode 100644 index 0000000000000000000000000000000000000000..310d34077f1b7c33d81092eec2a2b97da0ef2ad4 GIT binary patch literal 40 rcmb1OfPlsI-b$Qrj_5?$ZMoqn#hX-=n3<>NT9%quVr6t{<@^T#*`E!w literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5356.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5356.0 new file mode 100644 index 0000000000000000000000000000000000000000..8cac0affdeea1b99e2ddbe0b25d4156cd26b6f6e GIT binary patch literal 40 rcmb1OfPlsI-b$Q(S~gL3TW&Z?@g@}|X6EU+mZj#ESQ(YY=6nGFzZ(n; literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5357.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5357.0 new file mode 100644 index 0000000000000000000000000000000000000000..4fbeb7cdad36e23859dc6034ebf73ccff8de519f GIT binary patch literal 40 rcmb1OfPlsI-b$SR87-siw%l-(;!P?_%*@ksElbTSu`+7^#4QT|#`_Du literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5358.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5358.0 new file mode 100644 index 0000000000000000000000000000000000000000..73692bc30c174784c0bd0c3cabc69f9c3242eab2 GIT binary patch literal 40 rcmb1OfPlsI-b$RtuS}!tw%l-(;!P?_%*@ksElbTSu`-H#?&b*q$eIib literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5359.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5359.0 new file mode 100644 index 0000000000000000000000000000000000000000..d7713edee6d28db3151249c4216772915a7a2d1e GIT binary patch literal 40 rcmb1OfPlsI-b$SAUv#4Ew%l-(;!P?_%*@ksElbTSu`;p_UlarY$zBW6 literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5360.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5360.0 new file mode 100644 index 0000000000000000000000000000000000000000..ef821c8121e793c2f9fe4f3fcb6a796da0ce3518 GIT binary patch literal 40 rcmb1OfPlsI-b$P3O literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5402.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5402.0 new file mode 100644 index 0000000000000000000000000000000000000000..f179568279623e3adef4d1f38a7d9f085b074848 GIT binary patch literal 4880 zcmaLbdoh~Meme^4$VOx<@>_kS9Si0CwC}wq2=|XH0 zrB+d?glYE5{yh?E5~w=j8AA`Rj%|{?n600YO1p zUWyVyYsq+9ZPGP)^Z$8eE{Cfi5^H7NIyF;YG_{UDa85q#eFtaAk~#6nmC93aP3#WA zrezN4Is-xMM-4wBnw`S>vhf#rPYmSP3s*<7{qN?eVU%o~ zsUf)Sw;##}a&SP(wcRV-fGnL6ZGz-RTE1JVB-rA7X(5>TlqC*G+3mxV$gu-&e6)c* zkqxGU@<^^4m*W%cCHlI7#X=>4>_J4Z#lZQtv!(Mm=eObWJgz=htN;AU^*$TLzTQET zyu7_PiM_l-wcOj?SoMWr$H6UH1I?_^Yn3Wd&&Tp{FMCww@LF(-=pY=Bx8Q{2P6Kjs zWve5SvpJM3^m)g&pUc$)a-j?dq&%*4rVPlE%Hows-p8e6muHVy>+3grfh?5afRsO; z*;)eRf0i5+Be{!~yTb0VYH6jTfZVbS2c*nf@MHv#i^DztK=LzXYG%KX%dDQa*%lz5 zkH7&bHyE#Y3FM=y#)(KS7*5HYt7qBN;dK%qXQ<(Tl*2cNKLhgYfVUY)PF0~~cllJ7 z&;Mis{R4hj^y{W z9Agp3uCkXd2XbjK4oLX|lavPJ4Yy5RA-Q}6HFJ8!b~eF^odD!KDGo?EyU1Dy_MIQQ zqkd5SX5Swx#IS+;9nXM$7w^XBzq9YA_1BFu3 zoQnhU7SUCO8^H7N|D44Y^ncPOs#EfuAJ((YS3(Yh=abfp15#GxjdB6<@=2mGNd7|0 z(V8BtTwS{(kRztxfRuM1s;L0--CDcJNPaPrnz_TxnN>fM@es&o>Tp2Hx;vd(fIP06 zZ-?Y!4NCTDp34?j#5e$1>p2ccd2yyEk{iCw{;IDGSq0 z;BU0caYgHpyjzQs4b-NyIkRT>09hiz0V#jl-fIkIp3${849P9Dd_Ucc?R{-z2V{#L z9FVeUbQK54LC4+WkSrTT&FrjV#G2Vxz|U2?WE_z4mhs!}0r~U|on$2Eji%(y8lzaf zwRt6AX1|*_AmxMO!!Ck-&liQJ49ef^yUmTstgD`RJ=pg#{`mZN_PzVWT6S7^8+?an zoVaoky;hPobqmo$Rd(S6c_(m-gC}u7-r}!0O%uWMIXp5-isY?2lsv;$neEDR5db+- z69=Sh6Oj-PN*R_Rb!}`_s*|oT)sTr1LSNM9FX#hw|8QITsg(u5y|;HO17MOi@Dq?D+6-bG#rrf zf?BHwK=%5x$pgtJ3@F*8yprkEk$(+jgS|K)<*dF#d0^j#&+R=2_Y)TdJj z_C2=-pa0Ij54-Qn?omzE1h8IsWsgn69=R`O*07o{XXS@UMG?ZOei_i>j+c(Naq(Yvri%pNcjr?&1mp6+;7P^ fw*8;0siu_Nlzfz#r@JEw$PKq~K+0wO&+-2O1n~j$ literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5403.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5403.0 new file mode 100644 index 0000000000000000000000000000000000000000..8336d930ce78e6085793c9835a0886c3146a9078 GIT binary patch literal 4880 zcmaLado)#99Ki8BO7}|Mq`V8)BQed&Ds|2^#>|+mrsp6eT@vZ#RgDUf2_?!*DkY(m zXG0;WYo*C+q(&-KLXlFEyi#V_F?;{oYvu3#{oMUK>;BGir%m|$mlC{Km!mG~T+ie` zu$nHj&ezRrv#Cv>$7cTp-)QVkKdU`<&amtJQqIxF@Li+l5XhJ3MCu{AgO_q| zy@Y;q+=RjaUpL`S<<;*US!c;^9+)C~18)KPtm?_s^NIH!W@UA66@V$cH{yVt;-mKp zdmtaId?!G1E{BreZhFBURBc~otu1p!ay*xkYm^(B3?Pr#;eeEV%neh4ydZGxJ|s6uQ+IZMTErUu zWz`SlH#=}Z%Gni18-cu3YE}Z0MN=qwOGpl@`Dt4tkS}m>K+1y&5;8#E=lS9Sl2c_U z`R^Z7*aG#uPe8uqg#%LVaakD#@;JQoL~Y}Xhl0pwN1 z4z);rMayn&G3=@tDSAMzyov)-mfrY563Cs`^jeWzD@)zkA$Bio6QF1ZWSdePkn$%< zB^R*oTKmJ=#^o>e{l0hIbe!}tk!EM?*#l(7J{*v;?)!r#KyKIf z(LnMrEmu@Ju|w95ML_Q3;eeEP3zd?Ayz+sy8Iqe7s5@Knm$9aC0WCo0HsFAi%>`-0 zK)z6?V~yl8MM^$YW5Wv1om>XwGxaziWykGN`+;2lY-kmd)0HT>F=7GR@^6tQkX<@( zK+4fKwVZ&Q5^-Y#k`E|Ta+0YT`y?`>3dp7VaX`x5BU0@^PU}7rgye2o)=1K06C#WR zK+fsG0Vzit#I6H!dcx`$B-g1>cP?D1#q!?u7y#Mj1P(~KKe90k$hMc&l961fO35BK z)mZ=AsXQQGDZl|K&q}}H4)&e5e&?BS`HOwGx^Bh_b9zd^zK3|?^IzHb!PB1XFs}x_ zwSwZVTtc6fIE|Vjp-u=lhRR|9$X z6daIpc&e*2khg0uZ$xq%Ef0%JvTyUEP69bX7YC%gXGz{%Am_c(e2wJt>C~Mq3VNBU z39I2Rh1}OTAmx#AgKI#xvwu6mYV_@wIfIf9IP^02ENx$cJAdnl15$2Qai|6IdnJ(~ zl4I2=x#U6zqnns}6Ug?eI3VSF16|EPZWcuABe|EBL!It3w=3P@uN&bnI3VTJlgDL& ztSWGrkK_j$)SY+9HZsORr{RCy8Fn}zE~sVFRs@%Ur(tY@ z15)-YuD%Fl$;hrXNItDa$$65unbyOL4*|I<3J0WIl^ksX_We?w?b>nqi+%6zmtZ3& z7N>xHSFXqBzq0RAHD0V?bkbTdMOj;fANs6tZE6b1_(CS{)K+;gh3#`3kW*xC+BXBp z+QJnPNbaEJAWjiukh2y3XVv6^15!2*OfCWPsi%6!kX)@p-PwE76=q?8HQe{$RveJ> zNTkwUAp3UqW*|9Nmy!>qGLaRwU-4i=WWA6 zuC&q3P-A}Nys&V4j}@ZuXcx!nwv7#Dy>-@#Lw>w;SwdV-o%RA7UH$+*Kf0%SlsF

T3Rm72cg*6Ai&-{I8qIqwfa_sYWy#8Jjw_^8_G6UzH zix(WypMBY&H;~+>Hi9?NhXiO@yr3Z+$mNIo?Za}R3L|GrmkFjW)>HxVlVTE}<*|}2 z%|IS^pt2OpseKrE?Uf+Gcekco1+r%z3DEL=={Q3mTP3Dk#Bw_;qbptl4;POnAoJIg z04+z?rA-5}=iq?bSZ?gg^t`poMesI5Ul+&$1|&esT`fh*KrV7NQDFI0KSowW*$T>B zQ3{Y>x{?4b$0hkc1hTd4;U_F-f5*t#xu$|UrgPkZy#FBy(6ZEQ+$SJ+7gXq=p3|J5 z%E*1=bp_3xwaa(V2bJ`BtTE$5}qvq za_!%5ys=!uW8`5aPf?1<#|_A>5)z>0G&fNSkatMWhhTX-pOH)D4al`Ba2b%xZjb;i zYqpG52l7#`j7TiMXXR~?m(kv(CO3eb=R*Rt9IaK|4dl?}p@~>-QfGR0Osqhe)#LvG z^3jzfK+7R*UL%3LMRiyvmMi-+@=^a{wD03SH6R!BNPw27@C&Me+`Z^YK9+Me7j&fu6#-H# zKWF75r_+#r#sxW$$DSquTAm>(b_epYGsBy)EY)IqPLjr>oC||i0C{i~3DEMa(xHA} z-$hCB@-OmN`+iGh6EZh?zYgrX_Xbk`jeWP23z5Z?`Om==Hbu=H_*tde%oKw{;t
z}ALa{sZraQNjND`|Mg_%QSwJ58js$2~pt>BMmOLx5F_ybn zdEmnNNK5nXZ$N%vO#-w$Bfskb==nmUy91UV=rBDSIr<`dji+!ccikodS}x@;^8j*P zy}3J<&+9UBM4%^nyl!&`==ttV5}@Vu;61m1T>q$ZCYJLCGV+PbPH3WHCkN!`EhIq8 zU7v2jZ{fR1=N4mmvmPVg=Z!|ib{@V!mTVvaT5i#_HU&Lfwrz{W@+(%hskTB7nz-vg z9{Yv_Xqk6>;VU3-+Bqi$%k_hpp7SN9NE~+MPas?ECIMPLHb%Av$OXp@_F}nwFeBRr z86b5>XFDMOP)-80{88wf1@?WhC}!Um`Kx_*JL`aSjYb>-`+jC2ssF~lf5={hWM;2t zfhj`MPnO|l?bc_e_&81njmeF;2c}5cP6G548itd@fZWMXxP;|328?{ON*#s2;`##F zL4yQn`Re3B6M&p&HvJBk<*ZydMhPA8QP~6J`xYcX%RAGgJpX~5r^MtYUH zSWY!!WMA_q+|rEqtAK1am;`7!aPI0!pl2nYWJ@f!v$AY$16Q){u?CR)O(OwX{wYfV z|2M^Z2RLK7(U|EuN4J(!iV4pI@^oJkpyi(<1RD<{DqdX}JKP$nEnZjmt6{j!pE(22}KPCZs3Yo!b zXDHA2Pr`C5D`!hiau?Ulwg9q;hy-XkXn+vz&Su&0omiHbGd%}wEaKc#$BYDWNizx1 za?Oit`Jm^Fr|kt;F0o+bgwi9N+l^=N4)<4(04<;Jn|2Jy&B9Y>u)KXJBkSMG=lr(H z;kWQ#A`+lwr{csMAm8wet;OVT?4eu^WTi9p fyh%OxYOWv>X+wmqKo*}R0a`A%etqsgQXK`` literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5405.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5405.0 new file mode 100644 index 0000000000000000000000000000000000000000..da450a251b58061067ee49e14b96a252a70bdcaa GIT binary patch literal 4880 zcmaLadsNJ67{GCqYDOy6lu{{FN;a3pcJX^hShv}VCAq6a5;0v!Y!@S?B%4FzGAc?p zIi=&Is5MBcb=yjXR3Z#w6k+Y@m3`mGdyf8ozn^*D-}yaL4es;xCpB!&FkX4(^VM8I z-4w%to??%+8-AJ|?7bn-eq2e-gI`U1&soV&yYiABNVfHy(^_S%!ym-w2lkw{_45x1 z5R4Wi2$!V~&mBy=&5!;+KY-8U_wy4Avah61%5H3VZq|FQZ|I9o-r(OR@5OF;Uj~+! z?GPqjm1-LF2C*G#BYE{MB%tN)LmSkAyfeFh3YN3`F|zOGjly*m)2#<75 zcm?DLM-tHTt@ZjhfqeW?^A{{jRT%la)=;5;Oic&bk7*Foy)RTx~SE7|7ek#F%6GDJz?b)r66Gs?|U~JdOmkyi@AD%e42! z#~Zlxk2Z^MvNm6f&+`uU@cX)fGG{eme99e9o8I$^+{)>^UnJzb3Ezk>=sCY)y?3B^ ztyh4phnH87*uyJCu=w2~bmMkyHMqrfpONRAS3s$ zPD8hj%`pUWfiDSY*`VpME0CMUrj}qiT7!`rhbE!+fHP7czaCElTK@QW7az!J#qKw; z{DPIu9O9AkM^}3w8($;=Er*S|a2CjKFPgVt`KBf_v;0UTQg_HI19Ej431~TGmCt9e z?|(<{X#FOCx9_$6cBAIxn@hpIYs8ZCKiGGRRxhMGc47^qcK9Y_^|{Fj$V)m&K+DC+LsNllVd-a# z(oHuc9SQA=aZXA0{RvadLilH`K%rwDaUfS0V9uA=bK+zhmnAmx5(ZE0ohe<@)FDChRn>-StIlPM#v>_EBYc3)IEz9*<)&aR)w^Rqq z2}X>}fB1;Iq9`i_vZX!=Xjwyj&_6(~5XD+xS;5L`N18eB{%U=Je8`0av@8lVf$zo} zk-Ht1>x`M1#a<2Em)2l6AnUo1fR^jGKY@RtC0SdGuv~1y$jZBKaGP4KmIL{^4GCzO zKgaPBcp8^uepb80!M^0+5J-Wy8-TAsaUejl*!eoC`fev`l3_t#xYC@iMM z4eYyEjhz3%zN=QRMJqLYM}b?Ev_=Ks&ytujw>Tgz;bblC_23rnZ6u&?@$SjGgFsI9 z`aKHEPg(hK;{|TL?Tu6*w|SF*mh+$483TEFoB1IuSDG<1m(}ER(<~fwfLz;70$MJt zly?GI^|>Mo%Xz~XIZbko8*y>jBp@G_lYo{5*Zckn7qPs5I3w4qoZ(~vGwuUf ze+>y}IsaOL9gquWhTg*RdsaTEo6fmrh4le)>1-0v^3h^j^2h zyL5gQkjod5fR@uHs2m0IfP^7kSS~eZR~F bYMKQjPjyb>wlyl?|9bLEB%o!9byeqoc6|mC literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5406.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5406.0 new file mode 100644 index 0000000000000000000000000000000000000000..8ecde1bb53ce5519a6c1d192e1a920066bdaec90 GIT binary patch literal 4880 zcmaLadsK{R9Ki9U%giX`BB4k#BHFb0O2LFsp|+My@40@PUUmpgXHVIK-Ku^J zd^>dqe>2Ca%e*&8Z4)>MC1MiL^4XT-3xRy3SUV5P#hQ#a+vriYap%L;8qR=Yd%_Fnx) zftkxElYo}(bOt*BdCJ44Pgu^?VdQ|v!}+N18e~BBc|-zQ)=6-959GX}v&N|BZI0Jv zRaRPFIk_5E8;Ydm(kXKe;4#9G!h>_o)tV6cDn`Qxd@82Y# zUZJR83}p4Wk&m!kZNSWY-eM>6%F5CQ@`ei}pyd}|T{i>! z?vk`a)hB`BWklpI0jl=Q-V`k=eYXVR-?_Ub!0re!H=_`RO(Fp;dwZ2`1+raR#wIMcvhp7yC#17g@e`2y zx08UDBYbAVzoQPy7o=hN)?jAl1Ew~p<9${qkZbmnfR^vc)~mqG6U)qh#`5VQjGR2l z998FOtN`-aQzW3}qS6P?z`lDejmztkKihZD^W#wQPX*;*-(}0l`7iAIKe@}0`v6Nf za0}_)^55{Ya?F@ps0D^-$b1F-U3nyf1oSPqyT7jja)v0r3d`Hf8QFJ^5OqttB;fOz zs7C@?R$IO}3gjZ|c@0=rv2u--7MkE3WeenXTN2RnVtKkHkPBa0y~1+UP-f=0)*9$` zr1%dYKYT?3T5e3ZZ3yIZ^V>8$d%pdOEg0ExLkm|qPAdt>4M8NJRd6eiuMQyrEzb@9N($sEajHF*+gaH)K*jA!_h|-lg%1g6nLkt$2joHG zKo2a}4P$05TXUEDVd9BvKvvHo0WDAM$nppB0w*UimK9cvymZ8$+&lN2Mj(4TlYo}v zbdT)@vP-f$7|XfC8QIIZhI?lga{|a8Q%FF|K_41w!M^L>o)y|Bf41+fo&8a5>VAE& z?@_hn{1^6J_qr509LT){ZgEDnGX_5^-kQ0^`Rogv;2B>HZgKE23FuoaS9-Ps`Of0+ zcVhV|D-U!j<<#ZD@V{)Ma1zk6_(9HJK+bwNavzqJHq6X^Tdr`<-Dk4E%#Tzgpyfpl z{_s6iJ#R0P@dI+?Y!c9NvmhlA$aw)`6_)RfU}g@9&E*`X z?y3i}OArZYS#{Gp6Ue*LEIP4VIg*jzto)I?Z?q^C$W1a5&~o87dX+%FP;pB*wdZ>^ a-=2|6ymB~Kf2kbE>E}s6%TYyl9sdQ_3;f{# literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5407.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5407.0 new file mode 100644 index 0000000000000000000000000000000000000000..1f35dae8a7f0f545419bb161676513566200ade0 GIT binary patch literal 4880 zcmaLadsxg_7{KuqY6g`qnl8FgXw#&Jr)}bQCfBu1?ka2;quXwwi;{9%N!ViL9u=}g z<667uN=4g_OOm2lwv0@+R4TG|pWUaU_MF#w9{qj3pLx$b^ZsTMXng!TxyjR(m$JJ) zfXk`%FdMQWTpGN}X<`Em?DT^~(6To^$#u9Sr$H`TWS9)54I| z5fR4LMtj6}cdniypxvp@{y#s6&*N)`gd1mn^w+cLJ=ZU``7KY!bJA|?miJ>| z#jUMkVNw+`?G1Ld>x||ZdXa#Z1JAp^2Xc1yfD>5G)?{Qs{u*(Dx;qKT$Ip|1mK{^q z+y(OLQ^h%0-lN6H0|NcTvctJrK#n|30wDXUnt<%ME%`Q;?=AKMAcwk;fR>+Rz4Wo}{ovyb ze=`E|>)LEtC#x4gop8 zqCqRwtu3uLQVB%o!!VMa8Nr$#Q{hGpd-X68FxdFYwl70y$lK4+QjtM3KEq`+&S_)=f<8rb9%Za*-EcZN!4vZaO2jrCTB%o!F!bTO4ALRMo$8s|( zn;89w*00_LpCqCJ643I>;l4INzFKJa9Lx9hn3;bX8;{B)LU%Cp_cusD%Uj!L-UR#p zPyAL@pZv+b*J$oU4Hv7dz`my?kn^9}cYBo#g`Hb64ctN@fBp`?R*pV%3)!M*6mHcB zzjss@NI>6Wh&oRKWXn!DU)=MgJv5Y&&E-Kz-+qw+cz*MegI!r8}*&fE=1g0$SD^p%1?cjN4OUvE0VW9T)77py}yZAkXL^ z0WEj6E}aQxesySZGL|0>V`dJUX^Y0U9QhN-dZ{F!Wh>X_LLe{r+4MM;ubMD&!dw%S zmuj5}#r&6t^avE5vW?eq;m zZhuVzS{@TSng`?&KJ6MFJx{-Eb4Fg)`i5J)Infr#xxOTz<-AK;OMt9zTx^KtJr<0- z#OVd+VZJR7$Oa}Ppk<4&6+$2n_S$8Sm2ft;GN_^e0 zTsxeZxhS!k(~L`60OYh;B%o#9I)@A}v$unT1k3rBj9f9lf;-mY4`rDn321r9LMi;U zvua1nQY>d!F><0=8RxVzdmxzk)GiXx^2YL^HDKS*RLxx8Cx5c#)l}Uhtd*v^;f__ah)D zzv?`P<%_nAyf5uCm*sct2$1(TlYo{dSFVo$a*`~+5X<{UF!J@K+1zwH+bSUY_>+K^ z7h24o2V~(_v6WcvW@UBc87|<8Is82@`kDklu2#bTg|oaRDl98UGBZE@E{$v0Huf5r zx!RWmw5*wMRsm*KCY!&*a@Ej44C4~=plY5@4nf8n< XDo^EHov*-O-M9Ki9=jeD;O5iQn{T7xR`Z6=8oR}agnagtl@}eOmpye@cL2@7qYz*dA92q_%wJ&%d}9>+Z?CL z$al-NL`$MiO2N#R@<>3-Ke^P61~VVB*=&mC7FI6&R!wAI_AU{~BfcjAEeC8e{$SMm z;^Pgxy6&;rXIV?2A>jLjdj)*jz$#}okz3gOS(d%$+mBV-^M#S*yn$f2K&R*Y($zkp zArkL3)?VJ;VIf}L5jq|n3sFp&-vDq6?N#P>__wOnnCIhlRDs$ef*rsuN+cwpZ&7-u zY%GwIue@=>az3Atr*C|Yk{psdfPAZ(1hnjuG;KGKePvZkvAk2j$nO_6An~b0__q8| zLjqd<)uUb+$XVjwL$KV%%D?$vLdtrhPJ)?hW|4rFTS989fNUAIY!jC62$`8DhnJw_ z?13kM9KMzWwCuR{_G=&`)e&h}mZ~%IN&Ny8!^?FB@-{vRXjyw#i4BkyE1w?0^1=R$ zys04*d3Swi0CJHZ323=EKv@Q4_0d@sSWXzg$eqRM=z3q_UqDujAptGl(G*Sua$cEx z4VGJ3xl){rrruDk1G2b+1hky*n!gLk=gycmVYy}?GjrF*cvSh=Z6c6&pCbV+e`p#R z3-(<(A-4Im{KdZ4D{ny={soy}-{qUh`LFD|S+h5CYtD26x7eQ7)QNwqP=mR}#f=f@ z{wrlyaEnLBNkHEsCiL(%AnUg02}C_#+B8i@?hf`x;bPsZK(2U80$TRS_uK&FIK+9)-PO%5_%lfG{Se9!sGv}?Ei#BJNi-BC%NCH~s6%B{~ z(b8*;r((Hk5F?M8F^1lb<=Ql|}%L!H*Tfxk6kGfs4d{~>2YtP%Flr9apm9i%! zpyiahz8`^%;wx8Sd8-a1Z;{)e!O1QEf|*->AptF$4oqwSa(3H}NG!LpvUZ6%GL8&{ zTlwxE640_jTfGd(`_ep;v0OWtnOPw<;z2vnY-<} zxGYIxFnAhIUy^{9_4;|91@g=Vulr2y`TFG!W8^exE7vH9<^ef!Aqi;t>G6}fKz?H& z9fal7;f%c2?=jb2IztZR$`K@><#a{IG$6lsPBg=EJ1fV(ZRD(T(_aC(X$}c!*)Lj` z2V^ZLcUvqs=rc2S&%Vi-Yb-MXa``M0(6WiGj|9j%78XueE;nH0lA$%+tc%u-Kn}1Z z0WI%2ctinYtIaQ#V0phGBX2)Qq^K}8uFeT$%=gGoTn zTRuM?%Pp+@etiL#JN97$ko)?OfR-KY{uBavz(dnrSgtl^X0BX(fy-L$R0d@GMt30KwkB%{V106O&D3lql6ngTPYmK=UYiY%XUGpt%2;~U4913JHKJ%YVW<=fjy!Z zKoi1TeIi|r(!<6~h*&l@1Soh1P++l_T>`VZ`71Ze;O literal 0 HcmV?d00001 diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5409.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5409.0 new file mode 100644 index 0000000000000000000000000000000000000000..43fb340ef8f1fe477ef5847b36351240ccd60454 GIT binary patch literal 4880 zcmaLac{tTs7{GB$#l2eOc9ko-h@_IyLgaU@gp_F7YgA;3MtZU&G*c#BvKLb-6Iv!s z*Q97hD4KXQMNveS3|drZQ(835oT9_JMpR`-te{N#usYSfCjS<`X&Z&AB3HT%V{J_4`Q++oF z1PB`~6D8$Ka>B>ZuBiF{^D2BEU%}U3C@Vc_*4>~~(gL#984}R)dhZ9L zfIQ;Y;#@2zDl&4;PcD+BoeH)X+{r_yJl&3V!3cIBQKpgMKafU zO)!v+XOn=IrQ`3V1Nq;U)=yYYAHv9a=3;AeAW3^petl!ov?*HQB z8>k)i&|pAT05-#XiiB%tMlJjX>q&bg%DhULl;?9646DCyqnLNN2&ViM3YikB*b z`|c#&b$>wq;=b1^L?iEpJ1@a~-w{R5f91X#+;>N5gU@tsW?rmF#?z;GP0a??C1hgFUXKyl)gX(Ndu>6l2Gqd|RXOyWMx(Uct^(3I>RxL>< zkcZdk*kHLtz{r~)+o7U}lUIQ}xt0X9?4NGz4CI>+-Y>&)<|sz~riw#?iOI@9mTM;g zEx#(MRReOV^olo@V}*>IYHxxzUsQ+h)o-InK+8ePE7k&8{Y6q3mY=h7i-iH2qZ1Gf zW)6Qv0$P5wLNpP`Jt?c>v0Sar%$#PTi!`U0od>e$FbQZ`qve7ekoWzek%8sB(TrUG zM1)GK>^p%xDUSrS9J!kB1MYiVWXRD0`HTB*U1WmHTB?_T`@S}sod3#wf0w=i)t!Ag z6Kt_)U;cUgtkf~g7NgCEBMSvB8?Z%1G70D{3c3VUKpv+OaTUuuH5hqAunIDLH=_c` z^210#%VLA~(Lg>WUfh7?`>dS&y*$cG9X<%i6}lv#<#`runn14V(C)(WRS`3DZ|Q4p zNUmEekQJVhfR_Ez78d~7Y{d&X>%O<2Op}pituHylsE}ts{;-k+v}|Sf0sg0;C@dCW zIdLo_3nY)YgBvpz0$F1;321pqm^uP8w>ricV7Z%>JB;sgp~h+9K)&uo0$O%YU9%s^ zI~`Zf!g8G!GxJQ7D$Z6T(E-RSoJc^+r*%?}1KG>S$Pvqh+KilV_ZF999SVQH!Y7k} zmIF)AngjXVp3b#cP9Mj}vi5Rr>8vIhn0b2)326Dr3MVgc-}7!exDLo)-1p}_ib&*i z%o5!9SvBPRSMIyg4IeaA)Bh&eLjQp@06!~2%xvLqQNlH+Iop9P8Xl5>-XflNNEyiQ z)-RD_xt*1h?q+jGR6GNLY~w`&T2382y9me+Tl5cNxm<^tc~?(9r}s?z9gx3kBLOW3 znr6iV`S6qO(^$^dW#r>8PH{JLpG`!0?5-=k${%3JWtL8a&5e350;Dd8Ckk7je96(nF8djL=w<)#<6FOK(4=7&70Hr cUOjHW$Ylz9Io&ij_*?L_hy=9kqL``uAMJMdsQ>@~ literal 0 HcmV?d00001 From 81c438d839ab1f6c647b1fb3dc7319ba6c243489 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 15 Jul 2022 17:58:07 +0800 Subject: [PATCH 11/21] vit and examples --- ...ents.out.tfevents.1657856025.deepspeed.4989.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856036.deepspeed.4996.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856036.deepspeed.4997.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856036.deepspeed.4998.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856036.deepspeed.4999.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856036.deepspeed.5001.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856036.deepspeed.5003.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856037.deepspeed.5000.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657856037.deepspeed.5002.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860119.deepspeed.5045.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5052.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5053.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5054.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5055.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5056.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5057.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5058.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860131.deepspeed.5059.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657860187.deepspeed.5102.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862289.deepspeed.5186.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862349.deepspeed.5266.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862360.deepspeed.5273.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862360.deepspeed.5274.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862360.deepspeed.5276.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862360.deepspeed.5277.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862361.deepspeed.5275.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862361.deepspeed.5278.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862361.deepspeed.5279.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862361.deepspeed.5280.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862493.deepspeed.5347.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5354.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5355.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5356.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5357.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5358.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5359.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862504.deepspeed.5360.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862505.deepspeed.5361.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862544.deepspeed.5395.0 | Bin 40 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5402.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5403.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5404.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5405.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5406.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5407.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5408.0 | Bin 4880 -> 0 bytes ...ents.out.tfevents.1657862555.deepspeed.5409.0 | Bin 4880 -> 0 bytes 47 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 vit-cifar100/events.out.tfevents.1657856025.deepspeed.4989.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4996.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4997.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4998.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.4999.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.5001.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856036.deepspeed.5003.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856037.deepspeed.5000.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657856037.deepspeed.5002.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860119.deepspeed.5045.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5052.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5053.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5054.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5055.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5056.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5057.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5058.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860131.deepspeed.5059.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657860187.deepspeed.5102.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862289.deepspeed.5186.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862349.deepspeed.5266.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5273.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5274.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5276.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862360.deepspeed.5277.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5275.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5278.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5279.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862361.deepspeed.5280.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862493.deepspeed.5347.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5354.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5355.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5356.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5357.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5358.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5359.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862504.deepspeed.5360.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862505.deepspeed.5361.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862544.deepspeed.5395.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5402.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5403.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5404.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5405.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5406.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5407.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5408.0 delete mode 100644 vit-cifar100/events.out.tfevents.1657862555.deepspeed.5409.0 diff --git a/vit-cifar100/events.out.tfevents.1657856025.deepspeed.4989.0 b/vit-cifar100/events.out.tfevents.1657856025.deepspeed.4989.0 deleted file mode 100644 index 629925f1f49f59377519a810168d6b81719a1747..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$RR`QB_6TW&Z?@g@}|X6EU+mZj#ESQ(v3ovsG}wHFIu diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4996.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4996.0 deleted file mode 100644 index 63356ca8c99c67f3593db92c4a1f9384c8ebab25..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Qvm*{X>Y`Nhm#hX-=n3<>NT9%quVrBHJqW=s4#o-Ld diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4997.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4997.0 deleted file mode 100644 index 8bbc7ec6f3899c0f44a0414faa89e157783795ac..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$PYDl9oIw%l-(;!P?_%*@ksElbTSu`)V%R+$9=#M%qA diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4998.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4998.0 deleted file mode 100644 index 67b4ecc8be502e5a4b91a0e054b2c4d08ea163db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Pf-3FW%TW&Z?@g@}|X6EU+mZj#ESQ+{5$&Ld6w@wRN diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4999.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.4999.0 deleted file mode 100644 index 266a8889e5c191c29e61cc6401eba5261efe111b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$PadN!OETW&Z?@g@}|X6EU+mZj#ESQ*_9oNyliv^op- diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5001.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5001.0 deleted file mode 100644 index d5b46b2a94377a687dab033ea8af988e2d93aa32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Q%gY`Krw%l-(;!P?_%*@ksElbTSu`-(coX-^izj_Or diff --git a/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5003.0 b/vit-cifar100/events.out.tfevents.1657856036.deepspeed.5003.0 deleted file mode 100644 index 97add85310dddc2b8bf6e614cdd1cfef6eada004..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SDnRc8OTW&Z?@g@}|X6EU+mZj#ESQ&9GTAKm@x2+3N diff --git a/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5000.0 b/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5000.0 deleted file mode 100644 index 9a0c7105792bb026f579b8272c482654737e191f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$QVlN~rMw%l-(;!P?_%*@ksElbTSu`=Se-?I$>yq61y diff --git a/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5002.0 b/vit-cifar100/events.out.tfevents.1657856037.deepspeed.5002.0 deleted file mode 100644 index 3db8e36ed40b22b77945b3f801cccb8e61372c10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$RugB>|7w%l-(;!P?_%*@ksElbTSu`;@l_aPPl!>NT9%quVr699<#HGR#~KUv diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5052.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5052.0 deleted file mode 100644 index 2d922d9b4e9319e3674fe7363e5de2e0d444c78d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$QHxNmXTY`Nhm#hX-=n3<>NT9%quVr6uFY9I#y#^DR! diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5053.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5053.0 deleted file mode 100644 index d428e941dc0f2e5e8aab17726af6accb12807416..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$P++K+MAY`Nhm#hX-=n3<>NT9%quVr9ha%d-st&Fu@A diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5054.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5054.0 deleted file mode 100644 index a07af22777f759f7cbc91fcea852d015d37cfafb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$P&vTkzNY`Nhm#hX-=n3<>NT9%quVr4W{sI(IR(sB$z diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5055.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5055.0 deleted file mode 100644 index d8e166d6e0cbd190ba0cbd2b1e4e8a927e76e9f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$R3dDl2>w%l-(;!P?_%*@ksElbTSu`*h2dh;Ft!RQQu diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5056.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5056.0 deleted file mode 100644 index 624d107bbce7e80f4ba852b87096eccab22c8270..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Q@?_cMz*>b~CiZ`h!F*8rkwJbHS#LB2YeA!X}-p~y@ diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5057.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5057.0 deleted file mode 100644 index 082cb34f76f2c3591f4a3281709f681fe39e08dc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Pg*WTi=*>b~CiZ`h!F*8rkwJbHS#LDO%2V)ul+xHAc diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5058.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5058.0 deleted file mode 100644 index 3b00c8de244566b088f48bb84c0b0b5c266d46b9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Q~o`*PWw%l-(;!P?_%*@ksElbTSu`>Gmy6G?g!y63> diff --git a/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5059.0 b/vit-cifar100/events.out.tfevents.1657860131.deepspeed.5059.0 deleted file mode 100644 index 91f98d0677f6baf076cc38a940a535ba9564d268..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$ReZ7y)wY`Nhm#hX-=n3<>NT9%quVr4Y{%$5QG(V-0o diff --git a/vit-cifar100/events.out.tfevents.1657860187.deepspeed.5102.0 b/vit-cifar100/events.out.tfevents.1657860187.deepspeed.5102.0 deleted file mode 100644 index 0ce5790673f582ca01c690b865f64f246065d168..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SDRtLpww%l-(;!P?_%*@ksElbTSu`+tLFLOTt#N7NT9%quVr8`R$Nm%m);kSw diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5273.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5273.0 deleted file mode 100644 index 6d265e24d3e9702095e4819216da8ee677dd6e26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$P=JuS@aw%l-(;!P?_%*@ksElbTSu`+tBGrJxD$si0= diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5274.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5274.0 deleted file mode 100644 index 8587821fb84bc10aab2690d3f48f0cfc48478b2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$S3(@o6mw%l-(;!P?_%*@ksElbTSu`;?-wa^y;$+8S@ diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5276.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5276.0 deleted file mode 100644 index 19bdd90a77fab6d4e29500a28ee771b597a5662d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SJr(2uZZMoqn#hX-=n3<>NT9%quVr6vw>>3UL(WeZ` diff --git a/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5277.0 b/vit-cifar100/events.out.tfevents.1657862360.deepspeed.5277.0 deleted file mode 100644 index d070809fa2fff976409166604459f0b41976bb9d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$PnYKCTZTW&Z?@g@}|X6EU+mZj#ESQ+un%!mO1uc8XO diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5275.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5275.0 deleted file mode 100644 index ac348feea85ba8e8caa0e262e424608a8b39d7ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$S2ehy}KTW&Z?@g@}|X6EU+mZj#ESQ#z-Y_bIaxM&Op diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5278.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5278.0 deleted file mode 100644 index b1193dcc058f459a15b7c500ba6c44813e5b521a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SROr6c_w%l-(;!P?_%*@ksElbTSu`*&U;F|{k#=8qx diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5279.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5279.0 deleted file mode 100644 index b428ee386651a07525abf39cab1e456925049715..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$R6(_GB#w%l-(;!P?_%*@ksElbTSu`=q*nSBHR#s3U# diff --git a/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5280.0 b/vit-cifar100/events.out.tfevents.1657862361.deepspeed.5280.0 deleted file mode 100644 index 4f1e216568e518d859b17889b647042d60cb7bf1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$PkbKTACw%l-(;!P?_%*@ksElbTSu`;^$V#);o$2tut diff --git a/vit-cifar100/events.out.tfevents.1657862493.deepspeed.5347.0 b/vit-cifar100/events.out.tfevents.1657862493.deepspeed.5347.0 deleted file mode 100644 index bf9a25398482db49e2a83e6f42d0521f9f84a52e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$QG^OM8vw%l-(;!P?_%*@ksElbTSu`=ojTKF3P!^jMP diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5354.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5354.0 deleted file mode 100644 index bce1f172343852d2421f7dba52a5c80c9eb67a3d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SFTP&mOw%l-(;!P?_%*@ksElbTSu`)X6ZBqyU$_5Mv diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5355.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5355.0 deleted file mode 100644 index 310d34077f1b7c33d81092eec2a2b97da0ef2ad4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Qrj_5?$ZMoqn#hX-=n3<>NT9%quVr6t{<@^T#*`E!w diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5356.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5356.0 deleted file mode 100644 index 8cac0affdeea1b99e2ddbe0b25d4156cd26b6f6e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$Q(S~gL3TW&Z?@g@}|X6EU+mZj#ESQ(YY=6nGFzZ(n; diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5357.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5357.0 deleted file mode 100644 index 4fbeb7cdad36e23859dc6034ebf73ccff8de519f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SR87-siw%l-(;!P?_%*@ksElbTSu`+7^#4QT|#`_Du diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5358.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5358.0 deleted file mode 100644 index 73692bc30c174784c0bd0c3cabc69f9c3242eab2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$RtuS}!tw%l-(;!P?_%*@ksElbTSu`-H#?&b*q$eIib diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5359.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5359.0 deleted file mode 100644 index d7713edee6d28db3151249c4216772915a7a2d1e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$SAUv#4Ew%l-(;!P?_%*@ksElbTSu`;p_UlarY$zBW6 diff --git a/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5360.0 b/vit-cifar100/events.out.tfevents.1657862504.deepspeed.5360.0 deleted file mode 100644 index ef821c8121e793c2f9fe4f3fcb6a796da0ce3518..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40 rcmb1OfPlsI-b$P3O diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5402.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5402.0 deleted file mode 100644 index f179568279623e3adef4d1f38a7d9f085b074848..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4880 zcmaLbdoh~Meme^4$VOx<@>_kS9Si0CwC}wq2=|XH0 zrB+d?glYE5{yh?E5~w=j8AA`Rj%|{?n600YO1p zUWyVyYsq+9ZPGP)^Z$8eE{Cfi5^H7NIyF;YG_{UDa85q#eFtaAk~#6nmC93aP3#WA zrezN4Is-xMM-4wBnw`S>vhf#rPYmSP3s*<7{qN?eVU%o~ zsUf)Sw;##}a&SP(wcRV-fGnL6ZGz-RTE1JVB-rA7X(5>TlqC*G+3mxV$gu-&e6)c* zkqxGU@<^^4m*W%cCHlI7#X=>4>_J4Z#lZQtv!(Mm=eObWJgz=htN;AU^*$TLzTQET zyu7_PiM_l-wcOj?SoMWr$H6UH1I?_^Yn3Wd&&Tp{FMCww@LF(-=pY=Bx8Q{2P6Kjs zWve5SvpJM3^m)g&pUc$)a-j?dq&%*4rVPlE%Hows-p8e6muHVy>+3grfh?5afRsO; z*;)eRf0i5+Be{!~yTb0VYH6jTfZVbS2c*nf@MHv#i^DztK=LzXYG%KX%dDQa*%lz5 zkH7&bHyE#Y3FM=y#)(KS7*5HYt7qBN;dK%qXQ<(Tl*2cNKLhgYfVUY)PF0~~cllJ7 z&;Mis{R4hj^y{W z9Agp3uCkXd2XbjK4oLX|lavPJ4Yy5RA-Q}6HFJ8!b~eF^odD!KDGo?EyU1Dy_MIQQ zqkd5SX5Swx#IS+;9nXM$7w^XBzq9YA_1BFu3 zoQnhU7SUCO8^H7N|D44Y^ncPOs#EfuAJ((YS3(Yh=abfp15#GxjdB6<@=2mGNd7|0 z(V8BtTwS{(kRztxfRuM1s;L0--CDcJNPaPrnz_TxnN>fM@es&o>Tp2Hx;vd(fIP06 zZ-?Y!4NCTDp34?j#5e$1>p2ccd2yyEk{iCw{;IDGSq0 z;BU0caYgHpyjzQs4b-NyIkRT>09hiz0V#jl-fIkIp3${849P9Dd_Ucc?R{-z2V{#L z9FVeUbQK54LC4+WkSrTT&FrjV#G2Vxz|U2?WE_z4mhs!}0r~U|on$2Eji%(y8lzaf zwRt6AX1|*_AmxMO!!Ck-&liQJ49ef^yUmTstgD`RJ=pg#{`mZN_PzVWT6S7^8+?an zoVaoky;hPobqmo$Rd(S6c_(m-gC}u7-r}!0O%uWMIXp5-isY?2lsv;$neEDR5db+- z69=Sh6Oj-PN*R_Rb!}`_s*|oT)sTr1LSNM9FX#hw|8QITsg(u5y|;HO17MOi@Dq?D+6-bG#rrf zf?BHwK=%5x$pgtJ3@F*8yprkEk$(+jgS|K)<*dF#d0^j#&+R=2_Y)TdJj z_C2=-pa0Ij54-Qn?omzE1h8IsWsgn69=R`O*07o{XXS@UMG?ZOei_i>j+c(Naq(Yvri%pNcjr?&1mp6+;7P^ fw*8;0siu_Nlzfz#r@JEw$PKq~K+0wO&+-2O1n~j$ diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5403.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5403.0 deleted file mode 100644 index 8336d930ce78e6085793c9835a0886c3146a9078..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4880 zcmaLado)#99Ki8BO7}|Mq`V8)BQed&Ds|2^#>|+mrsp6eT@vZ#RgDUf2_?!*DkY(m zXG0;WYo*C+q(&-KLXlFEyi#V_F?;{oYvu3#{oMUK>;BGir%m|$mlC{Km!mG~T+ie` zu$nHj&ezRrv#Cv>$7cTp-)QVkKdU`<&amtJQqIxF@Li+l5XhJ3MCu{AgO_q| zy@Y;q+=RjaUpL`S<<;*US!c;^9+)C~18)KPtm?_s^NIH!W@UA66@V$cH{yVt;-mKp zdmtaId?!G1E{BreZhFBURBc~otu1p!ay*xkYm^(B3?Pr#;eeEV%neh4ydZGxJ|s6uQ+IZMTErUu zWz`SlH#=}Z%Gni18-cu3YE}Z0MN=qwOGpl@`Dt4tkS}m>K+1y&5;8#E=lS9Sl2c_U z`R^Z7*aG#uPe8uqg#%LVaakD#@;JQoL~Y}Xhl0pwN1 z4z);rMayn&G3=@tDSAMzyov)-mfrY563Cs`^jeWzD@)zkA$Bio6QF1ZWSdePkn$%< zB^R*oTKmJ=#^o>e{l0hIbe!}tk!EM?*#l(7J{*v;?)!r#KyKIf z(LnMrEmu@Ju|w95ML_Q3;eeEP3zd?Ayz+sy8Iqe7s5@Knm$9aC0WCo0HsFAi%>`-0 zK)z6?V~yl8MM^$YW5Wv1om>XwGxaziWykGN`+;2lY-kmd)0HT>F=7GR@^6tQkX<@( zK+4fKwVZ&Q5^-Y#k`E|Ta+0YT`y?`>3dp7VaX`x5BU0@^PU}7rgye2o)=1K06C#WR zK+fsG0Vzit#I6H!dcx`$B-g1>cP?D1#q!?u7y#Mj1P(~KKe90k$hMc&l961fO35BK z)mZ=AsXQQGDZl|K&q}}H4)&e5e&?BS`HOwGx^Bh_b9zd^zK3|?^IzHb!PB1XFs}x_ zwSwZVTtc6fIE|Vjp-u=lhRR|9$X z6daIpc&e*2khg0uZ$xq%Ef0%JvTyUEP69bX7YC%gXGz{%Am_c(e2wJt>C~Mq3VNBU z39I2Rh1}OTAmx#AgKI#xvwu6mYV_@wIfIf9IP^02ENx$cJAdnl15$2Qai|6IdnJ(~ zl4I2=x#U6zqnns}6Ug?eI3VSF16|EPZWcuABe|EBL!It3w=3P@uN&bnI3VTJlgDL& ztSWGrkK_j$)SY+9HZsORr{RCy8Fn}zE~sVFRs@%Ur(tY@ z15)-YuD%Fl$;hrXNItDa$$65unbyOL4*|I<3J0WIl^ksX_We?w?b>nqi+%6zmtZ3& z7N>xHSFXqBzq0RAHD0V?bkbTdMOj;fANs6tZE6b1_(CS{)K+;gh3#`3kW*xC+BXBp z+QJnPNbaEJAWjiukh2y3XVv6^15!2*OfCWPsi%6!kX)@p-PwE76=q?8HQe{$RveJ> zNTkwUAp3UqW*|9Nmy!>qGLaRwU-4i=WWA6 zuC&q3P-A}Nys&V4j}@ZuXcx!nwv7#Dy>-@#Lw>w;SwdV-o%RA7UH$+*Kf0%SlsF

T3Rm72cg*6Ai&-{I8qIqwfa_sYWy#8Jjw_^8_G6UzH zix(WypMBY&H;~+>Hi9?NhXiO@yr3Z+$mNIo?Za}R3L|GrmkFjW)>HxVlVTE}<*|}2 z%|IS^pt2OpseKrE?Uf+Gcekco1+r%z3DEL=={Q3mTP3Dk#Bw_;qbptl4;POnAoJIg z04+z?rA-5}=iq?bSZ?gg^t`poMesI5Ul+&$1|&esT`fh*KrV7NQDFI0KSowW*$T>B zQ3{Y>x{?4b$0hkc1hTd4;U_F-f5*t#xu$|UrgPkZy#FBy(6ZEQ+$SJ+7gXq=p3|J5 z%E*1=bp_3xwaa(V2bJ`BtTE$5}qvq za_!%5ys=!uW8`5aPf?1<#|_A>5)z>0G&fNSkatMWhhTX-pOH)D4al`Ba2b%xZjb;i zYqpG52l7#`j7TiMXXR~?m(kv(CO3eb=R*Rt9IaK|4dl?}p@~>-QfGR0Osqhe)#LvG z^3jzfK+7R*UL%3LMRiyvmMi-+@=^a{wD03SH6R!BNPw27@C&Me+`Z^YK9+Me7j&fu6#-H# zKWF75r_+#r#sxW$$DSquTAm>(b_epYGsBy)EY)IqPLjr>oC||i0C{i~3DEMa(xHA} z-$hCB@-OmN`+iGh6EZh?zYgrX_Xbk`jeWP23z5Z?`Om==Hbu=H_*tde%oKw{;t
z}ALa{sZraQNjND`|Mg_%QSwJ58js$2~pt>BMmOLx5F_ybn zdEmnNNK5nXZ$N%vO#-w$Bfskb==nmUy91UV=rBDSIr<`dji+!ccikodS}x@;^8j*P zy}3J<&+9UBM4%^nyl!&`==ttV5}@Vu;61m1T>q$ZCYJLCGV+PbPH3WHCkN!`EhIq8 zU7v2jZ{fR1=N4mmvmPVg=Z!|ib{@V!mTVvaT5i#_HU&Lfwrz{W@+(%hskTB7nz-vg z9{Yv_Xqk6>;VU3-+Bqi$%k_hpp7SN9NE~+MPas?ECIMPLHb%Av$OXp@_F}nwFeBRr z86b5>XFDMOP)-80{88wf1@?WhC}!Um`Kx_*JL`aSjYb>-`+jC2ssF~lf5={hWM;2t zfhj`MPnO|l?bc_e_&81njmeF;2c}5cP6G548itd@fZWMXxP;|328?{ON*#s2;`##F zL4yQn`Re3B6M&p&HvJBk<*ZydMhPA8QP~6J`xYcX%RAGgJpX~5r^MtYUH zSWY!!WMA_q+|rEqtAK1am;`7!aPI0!pl2nYWJ@f!v$AY$16Q){u?CR)O(OwX{wYfV z|2M^Z2RLK7(U|EuN4J(!iV4pI@^oJkpyi(<1RD<{DqdX}JKP$nEnZjmt6{j!pE(22}KPCZs3Yo!b zXDHA2Pr`C5D`!hiau?Ulwg9q;hy-XkXn+vz&Su&0omiHbGd%}wEaKc#$BYDWNizx1 za?Oit`Jm^Fr|kt;F0o+bgwi9N+l^=N4)<4(04<;Jn|2Jy&B9Y>u)KXJBkSMG=lr(H z;kWQ#A`+lwr{csMAm8wet;OVT?4eu^WTi9p fyh%OxYOWv>X+wmqKo*}R0a`A%etqsgQXK`` diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5405.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5405.0 deleted file mode 100644 index da450a251b58061067ee49e14b96a252a70bdcaa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4880 zcmaLadsNJ67{GCqYDOy6lu{{FN;a3pcJX^hShv}VCAq6a5;0v!Y!@S?B%4FzGAc?p zIi=&Is5MBcb=yjXR3Z#w6k+Y@m3`mGdyf8ozn^*D-}yaL4es;xCpB!&FkX4(^VM8I z-4w%to??%+8-AJ|?7bn-eq2e-gI`U1&soV&yYiABNVfHy(^_S%!ym-w2lkw{_45x1 z5R4Wi2$!V~&mBy=&5!;+KY-8U_wy4Avah61%5H3VZq|FQZ|I9o-r(OR@5OF;Uj~+! z?GPqjm1-LF2C*G#BYE{MB%tN)LmSkAyfeFh3YN3`F|zOGjly*m)2#<75 zcm?DLM-tHTt@ZjhfqeW?^A{{jRT%la)=;5;Oic&bk7*Foy)RTx~SE7|7ek#F%6GDJz?b)r66Gs?|U~JdOmkyi@AD%e42! z#~Zlxk2Z^MvNm6f&+`uU@cX)fGG{eme99e9o8I$^+{)>^UnJzb3Ezk>=sCY)y?3B^ ztyh4phnH87*uyJCu=w2~bmMkyHMqrfpONRAS3s$ zPD8hj%`pUWfiDSY*`VpME0CMUrj}qiT7!`rhbE!+fHP7czaCElTK@QW7az!J#qKw; z{DPIu9O9AkM^}3w8($;=Er*S|a2CjKFPgVt`KBf_v;0UTQg_HI19Ej431~TGmCt9e z?|(<{X#FOCx9_$6cBAIxn@hpIYs8ZCKiGGRRxhMGc47^qcK9Y_^|{Fj$V)m&K+DC+LsNllVd-a# z(oHuc9SQA=aZXA0{RvadLilH`K%rwDaUfS0V9uA=bK+zhmnAmx5(ZE0ohe<@)FDChRn>-StIlPM#v>_EBYc3)IEz9*<)&aR)w^Rqq z2}X>}fB1;Iq9`i_vZX!=Xjwyj&_6(~5XD+xS;5L`N18eB{%U=Je8`0av@8lVf$zo} zk-Ht1>x`M1#a<2Em)2l6AnUo1fR^jGKY@RtC0SdGuv~1y$jZBKaGP4KmIL{^4GCzO zKgaPBcp8^uepb80!M^0+5J-Wy8-TAsaUejl*!eoC`fev`l3_t#xYC@iMM z4eYyEjhz3%zN=QRMJqLYM}b?Ev_=Ks&ytujw>Tgz;bblC_23rnZ6u&?@$SjGgFsI9 z`aKHEPg(hK;{|TL?Tu6*w|SF*mh+$483TEFoB1IuSDG<1m(}ER(<~fwfLz;70$MJt zly?GI^|>Mo%Xz~XIZbko8*y>jBp@G_lYo{5*Zckn7qPs5I3w4qoZ(~vGwuUf ze+>y}IsaOL9gquWhTg*RdsaTEo6fmrh4le)>1-0v^3h^j^2h zyL5gQkjod5fR@uHs2m0IfP^7kSS~eZR~F bYMKQjPjyb>wlyl?|9bLEB%o!9byeqoc6|mC diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5406.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5406.0 deleted file mode 100644 index 8ecde1bb53ce5519a6c1d192e1a920066bdaec90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4880 zcmaLadsK{R9Ki9U%giX`BB4k#BHFb0O2LFsp|+My@40@PUUmpgXHVIK-Ku^J zd^>dqe>2Ca%e*&8Z4)>MC1MiL^4XT-3xRy3SUV5P#hQ#a+vriYap%L;8qR=Yd%_Fnx) zftkxElYo}(bOt*BdCJ44Pgu^?VdQ|v!}+N18e~BBc|-zQ)=6-959GX}v&N|BZI0Jv zRaRPFIk_5E8;Ydm(kXKe;4#9G!h>_o)tV6cDn`Qxd@82Y# zUZJR83}p4Wk&m!kZNSWY-eM>6%F5CQ@`ei}pyd}|T{i>! z?vk`a)hB`BWklpI0jl=Q-V`k=eYXVR-?_Ub!0re!H=_`RO(Fp;dwZ2`1+raR#wIMcvhp7yC#17g@e`2y zx08UDBYbAVzoQPy7o=hN)?jAl1Ew~p<9${qkZbmnfR^vc)~mqG6U)qh#`5VQjGR2l z998FOtN`-aQzW3}qS6P?z`lDejmztkKihZD^W#wQPX*;*-(}0l`7iAIKe@}0`v6Nf za0}_)^55{Ya?F@ps0D^-$b1F-U3nyf1oSPqyT7jja)v0r3d`Hf8QFJ^5OqttB;fOz zs7C@?R$IO}3gjZ|c@0=rv2u--7MkE3WeenXTN2RnVtKkHkPBa0y~1+UP-f=0)*9$` zr1%dYKYT?3T5e3ZZ3yIZ^V>8$d%pdOEg0ExLkm|qPAdt>4M8NJRd6eiuMQyrEzb@9N($sEajHF*+gaH)K*jA!_h|-lg%1g6nLkt$2joHG zKo2a}4P$05TXUEDVd9BvKvvHo0WDAM$nppB0w*UimK9cvymZ8$+&lN2Mj(4TlYo}v zbdT)@vP-f$7|XfC8QIIZhI?lga{|a8Q%FF|K_41w!M^L>o)y|Bf41+fo&8a5>VAE& z?@_hn{1^6J_qr509LT){ZgEDnGX_5^-kQ0^`Rogv;2B>HZgKE23FuoaS9-Ps`Of0+ zcVhV|D-U!j<<#ZD@V{)Ma1zk6_(9HJK+bwNavzqJHq6X^Tdr`<-Dk4E%#Tzgpyfpl z{_s6iJ#R0P@dI+?Y!c9NvmhlA$aw)`6_)RfU}g@9&E*`X z?y3i}OArZYS#{Gp6Ue*LEIP4VIg*jzto)I?Z?q^C$W1a5&~o87dX+%FP;pB*wdZ>^ a-=2|6ymB~Kf2kbE>E}s6%TYyl9sdQ_3;f{# diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5407.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5407.0 deleted file mode 100644 index 1f35dae8a7f0f545419bb161676513566200ade0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4880 zcmaLadsxg_7{KuqY6g`qnl8FgXw#&Jr)}bQCfBu1?ka2;quXwwi;{9%N!ViL9u=}g z<667uN=4g_OOm2lwv0@+R4TG|pWUaU_MF#w9{qj3pLx$b^ZsTMXng!TxyjR(m$JJ) zfXk`%FdMQWTpGN}X<`Em?DT^~(6To^$#u9Sr$H`TWS9)54I| z5fR4LMtj6}cdniypxvp@{y#s6&*N)`gd1mn^w+cLJ=ZU``7KY!bJA|?miJ>| z#jUMkVNw+`?G1Ld>x||ZdXa#Z1JAp^2Xc1yfD>5G)?{Qs{u*(Dx;qKT$Ip|1mK{^q z+y(OLQ^h%0-lN6H0|NcTvctJrK#n|30wDXUnt<%ME%`Q;?=AKMAcwk;fR>+Rz4Wo}{ovyb ze=`E|>)LEtC#x4gop8 zqCqRwtu3uLQVB%o!!VMa8Nr$#Q{hGpd-X68FxdFYwl70y$lK4+QjtM3KEq`+&S_)=f<8rb9%Za*-EcZN!4vZaO2jrCTB%o!F!bTO4ALRMo$8s|( zn;89w*00_LpCqCJ643I>;l4INzFKJa9Lx9hn3;bX8;{B)LU%Cp_cusD%Uj!L-UR#p zPyAL@pZv+b*J$oU4Hv7dz`my?kn^9}cYBo#g`Hb64ctN@fBp`?R*pV%3)!M*6mHcB zzjss@NI>6Wh&oRKWXn!DU)=MgJv5Y&&E-Kz-+qw+cz*MegI!r8}*&fE=1g0$SD^p%1?cjN4OUvE0VW9T)77py}yZAkXL^ z0WEj6E}aQxesySZGL|0>V`dJUX^Y0U9QhN-dZ{F!Wh>X_LLe{r+4MM;ubMD&!dw%S zmuj5}#r&6t^avE5vW?eq;m zZhuVzS{@TSng`?&KJ6MFJx{-Eb4Fg)`i5J)Infr#xxOTz<-AK;OMt9zTx^KtJr<0- z#OVd+VZJR7$Oa}Ppk<4&6+$2n_S$8Sm2ft;GN_^e0 zTsxeZxhS!k(~L`60OYh;B%o#9I)@A}v$unT1k3rBj9f9lf;-mY4`rDn321r9LMi;U zvua1nQY>d!F><0=8RxVzdmxzk)GiXx^2YL^HDKS*RLxx8Cx5c#)l}Uhtd*v^;f__ah)D zzv?`P<%_nAyf5uCm*sct2$1(TlYo{dSFVo$a*`~+5X<{UF!J@K+1zwH+bSUY_>+K^ z7h24o2V~(_v6WcvW@UBc87|<8Is82@`kDklu2#bTg|oaRDl98UGBZE@E{$v0Huf5r zx!RWmw5*wMRsm*KCY!&*a@Ej44C4~=plY5@4nf8n< XDo^EHov*-O-M9Ki9=jeD;O5iQn{T7xR`Z6=8oR}agnagtl@}eOmpye@cL2@7qYz*dA92q_%wJ&%d}9>+Z?CL z$al-NL`$MiO2N#R@<>3-Ke^P61~VVB*=&mC7FI6&R!wAI_AU{~BfcjAEeC8e{$SMm z;^Pgxy6&;rXIV?2A>jLjdj)*jz$#}okz3gOS(d%$+mBV-^M#S*yn$f2K&R*Y($zkp zArkL3)?VJ;VIf}L5jq|n3sFp&-vDq6?N#P>__wOnnCIhlRDs$ef*rsuN+cwpZ&7-u zY%GwIue@=>az3Atr*C|Yk{psdfPAZ(1hnjuG;KGKePvZkvAk2j$nO_6An~b0__q8| zLjqd<)uUb+$XVjwL$KV%%D?$vLdtrhPJ)?hW|4rFTS989fNUAIY!jC62$`8DhnJw_ z?13kM9KMzWwCuR{_G=&`)e&h}mZ~%IN&Ny8!^?FB@-{vRXjyw#i4BkyE1w?0^1=R$ zys04*d3Swi0CJHZ323=EKv@Q4_0d@sSWXzg$eqRM=z3q_UqDujAptGl(G*Sua$cEx z4VGJ3xl){rrruDk1G2b+1hky*n!gLk=gycmVYy}?GjrF*cvSh=Z6c6&pCbV+e`p#R z3-(<(A-4Im{KdZ4D{ny={soy}-{qUh`LFD|S+h5CYtD26x7eQ7)QNwqP=mR}#f=f@ z{wrlyaEnLBNkHEsCiL(%AnUg02}C_#+B8i@?hf`x;bPsZK(2U80$TRS_uK&FIK+9)-PO%5_%lfG{Se9!sGv}?Ei#BJNi-BC%NCH~s6%B{~ z(b8*;r((Hk5F?M8F^1lb<=Ql|}%L!H*Tfxk6kGfs4d{~>2YtP%Flr9apm9i%! zpyiahz8`^%;wx8Sd8-a1Z;{)e!O1QEf|*->AptF$4oqwSa(3H}NG!LpvUZ6%GL8&{ zTlwxE640_jTfGd(`_ep;v0OWtnOPw<;z2vnY-<} zxGYIxFnAhIUy^{9_4;|91@g=Vulr2y`TFG!W8^exE7vH9<^ef!Aqi;t>G6}fKz?H& z9fal7;f%c2?=jb2IztZR$`K@><#a{IG$6lsPBg=EJ1fV(ZRD(T(_aC(X$}c!*)Lj` z2V^ZLcUvqs=rc2S&%Vi-Yb-MXa``M0(6WiGj|9j%78XueE;nH0lA$%+tc%u-Kn}1Z z0WI%2ctinYtIaQ#V0phGBX2)Qq^K}8uFeT$%=gGoTn zTRuM?%Pp+@etiL#JN97$ko)?OfR-KY{uBavz(dnrSgtl^X0BX(fy-L$R0d@GMt30KwkB%{V106O&D3lql6ngTPYmK=UYiY%XUGpt%2;~U4913JHKJ%YVW<=fjy!Z zKoi1TeIi|r(!<6~h*&l@1Soh1P++l_T>`VZ`71Ze;O diff --git a/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5409.0 b/vit-cifar100/events.out.tfevents.1657862555.deepspeed.5409.0 deleted file mode 100644 index 43fb340ef8f1fe477ef5847b36351240ccd60454..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4880 zcmaLac{tTs7{GB$#l2eOc9ko-h@_IyLgaU@gp_F7YgA;3MtZU&G*c#BvKLb-6Iv!s z*Q97hD4KXQMNveS3|drZQ(835oT9_JMpR`-te{N#usYSfCjS<`X&Z&AB3HT%V{J_4`Q++oF z1PB`~6D8$Ka>B>ZuBiF{^D2BEU%}U3C@Vc_*4>~~(gL#984}R)dhZ9L zfIQ;Y;#@2zDl&4;PcD+BoeH)X+{r_yJl&3V!3cIBQKpgMKafU zO)!v+XOn=IrQ`3V1Nq;U)=yYYAHv9a=3;AeAW3^petl!ov?*HQB z8>k)i&|pAT05-#XiiB%tMlJjX>q&bg%DhULl;?9646DCyqnLNN2&ViM3YikB*b z`|c#&b$>wq;=b1^L?iEpJ1@a~-w{R5f91X#+;>N5gU@tsW?rmF#?z;GP0a??C1hgFUXKyl)gX(Ndu>6l2Gqd|RXOyWMx(Uct^(3I>RxL>< zkcZdk*kHLtz{r~)+o7U}lUIQ}xt0X9?4NGz4CI>+-Y>&)<|sz~riw#?iOI@9mTM;g zEx#(MRReOV^olo@V}*>IYHxxzUsQ+h)o-InK+8ePE7k&8{Y6q3mY=h7i-iH2qZ1Gf zW)6Qv0$P5wLNpP`Jt?c>v0Sar%$#PTi!`U0od>e$FbQZ`qve7ekoWzek%8sB(TrUG zM1)GK>^p%xDUSrS9J!kB1MYiVWXRD0`HTB*U1WmHTB?_T`@S}sod3#wf0w=i)t!Ag z6Kt_)U;cUgtkf~g7NgCEBMSvB8?Z%1G70D{3c3VUKpv+OaTUuuH5hqAunIDLH=_c` z^210#%VLA~(Lg>WUfh7?`>dS&y*$cG9X<%i6}lv#<#`runn14V(C)(WRS`3DZ|Q4p zNUmEekQJVhfR_Ez78d~7Y{d&X>%O<2Op}pituHylsE}ts{;-k+v}|Sf0sg0;C@dCW zIdLo_3nY)YgBvpz0$F1;321pqm^uP8w>ricV7Z%>JB;sgp~h+9K)&uo0$O%YU9%s^ zI~`Zf!g8G!GxJQ7D$Z6T(E-RSoJc^+r*%?}1KG>S$Pvqh+KilV_ZF999SVQH!Y7k} zmIF)AngjXVp3b#cP9Mj}vi5Rr>8vIhn0b2)326Dr3MVgc-}7!exDLo)-1p}_ib&*i z%o5!9SvBPRSMIyg4IeaA)Bh&eLjQp@06!~2%xvLqQNlH+Iop9P8Xl5>-XflNNEyiQ z)-RD_xt*1h?q+jGR6GNLY~w`&T2382y9me+Tl5cNxm<^tc~?(9r}s?z9gx3kBLOW3 znr6iV`S6qO(^$^dW#r>8PH{JLpG`!0?5-=k${%3JWtL8a&5e350;Dd8Ckk7je96(nF8djL=w<)#<6FOK(4=7&70Hr cUOjHW$Ylz9Io&ij_*?L_hy=9kqL``uAMJMdsQ>@~ From da2462820dede0bb0e43a19b18f77ad81608ecc7 Mon Sep 17 00:00:00 2001 From: Zac Liu Date: Fri, 15 Jul 2022 18:09:06 +0800 Subject: [PATCH 12/21] Update base_model.py remove unused glob --- flagai/model/base_model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flagai/model/base_model.py b/flagai/model/base_model.py index 9ea2d594..c600cfb4 100644 --- a/flagai/model/base_model.py +++ b/flagai/model/base_model.py @@ -8,7 +8,6 @@ from typing import Union from flagai.model.file_utils import _get_model_id, _get_config_path, _get_checkpoint_path, _get_vocab_path, _get_model_files import os -from glob import glob # The base model for models class BaseModel(Module): From aff728bd337c50eea68b9e00093891b09f0f8112 Mon Sep 17 00:00:00 2001 From: Zac Liu Date: Fri, 15 Jul 2022 18:20:42 +0800 Subject: [PATCH 13/21] Update vit.py remove data statis --- flagai/model/vision/vit.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/flagai/model/vision/vit.py b/flagai/model/vision/vit.py index d2fe1ef5..44479a1e 100644 --- a/flagai/model/vision/vit.py +++ b/flagai/model/vision/vit.py @@ -28,15 +28,6 @@ import torch.nn.functional as F import torch.utils.checkpoint from typing import Callable - -DEFAULT_CROP_PCT = 0.875 -IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) -IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) -IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) -IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) -IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) -IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) - from flagai.model.vision.layers.patch_embed import PatchEmbed from flagai.model.vision.layers.mlp import Mlp from flagai.model.vision.layers.drop import DropPath From e5a0ddb800c78713713b1a5a9f3135f584ece311 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 15 Jul 2022 18:29:33 +0800 Subject: [PATCH 14/21] modify readme.md Signed-off-by: zhaohu xing <920232796@qq.com> --- README.md | 3 +-- README_zh.md | 2 +- flagai_wechat.png | Bin 68218 -> 149096 bytes setup.py | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1d2990b0..37b40dac 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ FlagAI (Fast LArge-scale General AI models) is an fast, easy-to-use and extensib * FlagAI is backed by the three most popular data/model parallel libraries — [PyTorch](https://pytorch.org/)/[Deepspeed](https://www.deepspeed.ai/)/[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) — with seamless integration between them. Users can parallel their training/testing process with less than ten lines of code. -The code is partially based on [GLM](https://github.com/THUDM/GLM), [Transformers](https://github.com/huggingface/transformers) and [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). +The code is partially based on [GLM](https://github.com/THUDM/GLM), [Transformers](https://github.com/huggingface/transformers), [timm](https://github.com/rwightman/pytorch-image-models) and [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). @@ -156,7 +156,6 @@ start with our [contributor guidelines](CONTRIBUTING.md) and then check these [open issues](https://github.com/BAAI-Open/FlagAI/issues) for specific tasks. ## Contact us -Scan wechat QR code diff --git a/README_zh.md b/README_zh.md index 88def64d..e8dc5a3f 100644 --- a/README_zh.md +++ b/README_zh.md @@ -18,7 +18,7 @@ * 飞智由三个最流行的数据/模型并行库([PyTorch](https://pytorch.org/)/[Deepspeed](https://www.deepspeed.ai/)/[Megatron-LM](https://github.com/NVIDIA/Megatron-LM))提供支持,它们之间实现了无缝集成。 你可以用不到十行代码来并行你的训练/测试过程。 -本项目的部分代码基于[GLM](https://github.com/THUDM/GLM),[Transformers](https://github.com/huggingface/transformers) 和 [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). +本项目的部分代码基于 [GLM](https://github.com/THUDM/GLM),[Transformers](https://github.com/huggingface/transformers),[timm](https://github.com/rwightman/pytorch-image-models) 和 [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). diff --git a/flagai_wechat.png b/flagai_wechat.png index 387bded2058c20f57a3c8746715d24eb96a0145f..2788958a756db03326a5cea796f2f5d9b63e7bbf 100644 GIT binary patch literal 149096 zcmeFZWl&t<_a+$JEfCxZAwaO;G;R$fK!Uplw-BIlcWWeA@Zj1&aBCoFu;7imy9Z}G z|C!kjv-vTz-*&5}>QYp9aqlbVyw7>gbIv9FgAx!2lN|Hevu8N+a?&c#o}qX>dxnsJ zjtc)Kgn73R{tLlL1t|FpGEBJx|A6sXPTT3(Gm__je-JFP#^ASzKa-c1P;*1r&p=Dp zQ=fUvn#p9FIbY<BwuZyk%!hWKmcs*jGvlt;D zS3N_-RM?ggmz1Za!`N~8st9s@*m6Bg_xx?=WC1egZ~k@U_15#Z$cB67Su1~Yb(_eU z9s%)lT7>_6`7Z>BTi0vJWJZ#H+3hjdP%MTr7H72Tjx&0X#Y2eY)eBy#$0^h&ZCQak z&lo?%9|Z-Sy4P(3%aX?e`cH0jl}_0)y{$HwSyrA{_{)xB%xM4)}@>x-m1 z4u`4?z?-Q(1_D9hsw(}pcjuYHv~Y0rboT!7$=QuU&@KtpxEF3 zYYH;D6=4tT>Iqk6%_U(cJG$AgPqg_}s}5eYEVloS(8temxp?z|YZCsqv8`c!SWs$z z7ZM`Fc48%fqL=?IJE8S}ee+_N&FJPHf#VX{S@$MyM=s3_)ls9l zXNRtAp2MegD~dz>s26Av?dyY9p1sfbAu+4H%R0{9(j3*xq+WY+r*RWUvQ*S5+Q9v= zbQl?5x$gXR6?NHO;wOejADB&lxeSHlTGZhs#NxeJDxn<_sZ={KuU66&{i@xn=;aOi z5IRp$U-k6JUjJA{^o!Y;Bjw;(xzrf(a;}v<^&nD2QJ%DA5>`}S%~v_oJ~qxtT(b&E zK6Z?COF7R0uLyz77rnhk7XlJQ^!a zb%dG5M~9J0YJB-Ykh;NJbPaWJ7cnSrU6OiLju2Nvhq~1C<#dHjP)~I#-uC6io=Xth zt z!eU+==_)6bky`B2^$F^i*OsR0aB5X16PZy=*Ig8^Cj)El=NFu6Tj-H z=xGAM+hdu-jD|B)kp?ceuZX-b+3baM^BvdLx|Qf3Top0eUJYyUJ#eE@YKzX-HX>Zz z9+=l>5+R}Lh|ar%@<$$!tJEt>KhVgZ5t@t)DE^3>z+;$7IlfR~(`bvHE@!$x^J+NT zS15~qFI{z?6!*pyv)O<+@r|hS%Y!Kzn!w4m_SZb9TH!Uu?h*U0^LrKY_f?|86r+eH z)LiS!3?3N<*rWFvaGxMZq3RU+>k)R9#xuXoaK3Y`kmgh*Zxo{c33~JhGClsI3q7z_ zq4a-$^#*)jCA&1IdCxO_OP9#n!B>d|PI5!q#Y#OkY&`Ft#s zKor;ven}&LFIutEA@8_8lC>XcZ9ENIZ}v;M&teWk{?N*7>W%m z7(Pue#ZHD020G|J%nWO5P*}@J+>;%l%!I{s)Yoaz98d(}S;hVJlr7rKU_jXjsnP2x z*`9Z4H%QjNG~0&K_ux@|pw7XaqR3aJ{45QMoUHjDb?aGgU3QfQRFcbHt?W%ik_-AS za5BqhlucYWvW!Sgl>Sgf8(jX09DmsU!FPF1r)bG(1w8mxHfFeasdis*U@nB8cZn2n z2~kAqfC@>tQIwwa>ZY~Qi`Nxep`O)13 zs__ih7sCN!Q-KtL{LCMm?2u=NtdiicK(Y9m;_N1+I~2ff_5M3od)!8+ej0^e+mbzK zPoB#td!sYMLKE{q8DICJ3G*V3M~n`&b^axV{5_{hG=?F^q{+ox*n)?OZ+j@7K} zPnu}Xf$Z1Yg33L!kH1!>fMfReCtO-0xKWa(eu;7vNrinry{!+a6Eu6B)dXy`n*>t0 zqB*dgZu|v@?h#ZP{~4~b>TdiWBd8Qf_j*(KsA<@BCGX#uZxUkC@s&` z@ZT` zodDO>)JE1px>@b_5Tc2!6NA|3D^f3;$cs@Z*HtVrGaN{~a96b(Ix(XWLQL#FwbT@g zWrJ#dg*5lClQO`*|FYLOBBV}qzF8T=Y;-t_MS|&QG{5J?Ff7#Vo4!uF*_W$Lk5kB3 zQa#2~K>#)KItvvwzs=4oCE3OE(#5*)jC2H=ljQ=p7LIf-(>Lro(^4I<{T=)>zh8n8 zGWBQ(u)XdLw4kYgC0L2FUZi%T#=ShqshWOOd$_vK=o7Gnt`w4Zp_8XOc*i#Sr{D}&FJ2_N!Ve% ziDh6+`#$N^j0w87WgyOg54#O|G)6DJck3H5nFgji7z1Ts+tR6>=GRs9^U#*78>v<> z(-P^Ndx3$a*q|i^*Tcd?R9jJCYL*%3RAl3(&m`Y+FG@$efGr@9BPWk^-sR=B-0mND z96Y56hR_FOyxmmBsJ;%Mz~@p%E76cTI#k06!RtyBO!dUtzkcpPMc4TdQpT757x;gJ zhJVa{kiy*k?Z(k3>xB)i?Y^;GBK4AsB!WYQDB8)JdE?-oXHr#sZw+QAXg&aDKXvMx zeTj@ygrL3R;%#xgO{(l|}xV}&!{CFVcD`dQ2mCyH?6)@S2Oe$2;z6f3XCD|1S zt2>zf9P-mU{05`Vl?f|mg(y&bjEZ#k_f6Jsk~qNIPo4a4uG7RVp*AzzMC5BOJ30in zNbooSlsR7^!s* zqF!;HOow+`c=p(fdE!I01eW}N@2aqK%e5WcGbN@uY{=t+`!FyG&(+TU*@I8>OgU4x zpFjL#rap6xn1t=*9?Jx9>@JA({L5E3aq0}?edX1v7CGs_01p}W2sDSn&ChKZ2tV4k znY|oH`3XsuVT{D1lK<}nkrR%R8uWHrne?|*e{O74e9Q2D%hvd%{C9>(BIVrnLW;r* z*}S)Mm{_V$lEPN&(n(v&^1VRF_MX`_^G}hLHtH49yZa=E=2RkJw%B?%jAqwG__}9$ znEOKV)>w8f?BwXu;!cKY;_glL*CS>akeF3Q*J1+ktsMcS7Xf4UGKAq5P^Im+joW7x zO2Km`w-2y4#?rW=KmB#t~MDS+X+3ej+2-XD7Iq!>} zc70Mz{d#`?|4IbTd_|8Gjr(McKDz%wGx2L|v8xdYX|EUm{lT+$S=Ry?$*_$isUnF{ z7G?q`C2%U}98^HJ*_RIRHVfq+D4*D#gC3TJ&?8m*9*6mabC$|ljM#1k9%%l%|7 z3EU-2y#EX5%gp`<&VNkUBd`$@MJ?_1C$M4YWGSZUr0@M}XzokXE!}&;sPO+A>T6b` z^a`J{bNDB#i%>7*8_D|>(&rTxeX{X*dt23HyShN>zZt~)W?&Xx{iNZjsmYz`r+A#n zzi99E*V2Mr%Z6%Q+Dd$PTvx;Ko6UP+$9Adlw%a~gn%v`n%}<>a)Fs8&|1J1`3?h|u zM4_0)59HX*BVFOxBQ;~Q@g2Jx48Z*H*VvjpWk(-kWAa=+wbXybTbecTu5rL_tQzF_ zuaF<_X0i913-v0}EL=(bf1tf%mMV6fBWmBPo>&j=L?y$<{1rdXH@`D-Qi(izQQfVZ zYQ9^ox$pLN@+Yj(QSGHKf-wG@rEh&jT|dDMo$vmqIY~syeeSqmSBLuS?=hEhpFV!2 zU7L4Jx(g0BTHdSQpH+8-E^JTyJ?q;eGZ2oT5`ED0@U0(9dq@p`S{Rvq{NsgBB7A`E@+9V0zY7LET{n^1LIQ-J# zdL>_q#z)!qP+wlmlj#9t&UOyL>Liu(L;g=!6Sn`%&eT+LNwTnOZkb0bNK>};9m-M& z3dJyo6}zXZx9HZoK88P3{y#rLE*l?_Mfbgx!dO;?{rpYSbDu3@lhW8mv*7pkdwTo3 zY*x&z`eQq}bJl@p|7jY3zt65F_^{&}TCBI?TW0X%*dkh8on!#chyZVUaFDXJSqYsgEg#`~A@e zo(|WEJDlptpGWEyKQ0l)o1d`$%Sr%2yV8=g&2C&jy+~jA>wu%mHJFm6V!ryYLGgTs zkrUffcaI<--y{{N;=$N_p~~N(n8aSdL4DqAH3e!7{|ORu?5(d)Y*{Ga@>tFZ$b)&Y1h(X%kehczalJkv*S%WU5M-cqPZnp@U% z79I7*9U-cWY`}Hvl(nbe&<{C6a=FbqNwiIwJ?YDHxc}(r<*NwlrA11B>7LN+5uU$h z@mGmUqS-LvV(O^R^St+EHD&n1)yMU;CAt}7iNERJcX0PIX)6+W0n;m?KpmB2Mlutz z{|qmG&o|(?T%s4?mq*TWnsws4gg9#Xkm)Be?d3=&^4P8O?eWa`5BENnD-+j>)Vpq5 z)M?;TQ1o(y3t467oWI(=pXDkkZ=9s$m&*)xWz5rCABdi3R%ZA&Ao ze34%G5!`wZCK=#hCI72;AEE3itb2yxtbl07W+gZZe)ICGWFF7y;D3384x*?CjOhwD zt=>2f)6=#PC&T}#82AjM0a=AvP$knBI?r>3y!0b}%Je;2%>QEbX@33FA2cW1TtdRf zn63&`zohIe$34BtXN0ertz2C6vzF81CkBUUL>XQ_X~@qQH6Be6)+@e2eK9?|N2+~X zli;UEV8s{cL*BuJ7LQG+jrTD;eK1MrV&f0ttbr!RQoln}WyiZo?h5rpBa&BliEbxje0sHheoR;W;CKRmrOc!tD-Vi zzt=S+_cRy;AlLELU5*DhDC8+ue%*T7ar8sQ!}C0BI{3?3^qFjBpWZ_Jm$3?o=c54? za!u^hznq?qJ`gF|424M{W-67P2nZn`1LKp4$I~{E#?M}+KnPZRYT%I<7$1%g zA~d79p28DxWCTno!G~$-r(9P*?itDw#BGw4`>E;wpGk=${-2%tOL36>e}wVz{X53A zvomtG0;As-l@E%AsNt%9A^SS?{`(mtTRgmGD5mRGz=1}=62H%gMxl-KDI@D7)8)O! zo|1*cnN!qvTAFXXMAi)4VkfC)Qp6NeV9K|3S&MZtiotbpu00v_Ae=6(%l$6B(;fuDjd( z#F4JjWntyDwdyB5A|??3H2|!Zgo9pZb^(6k7ZxfwVgF)=p3e}_UGTZ6RyaI54(K+S zWBRo}Fgg!cW4MEjcF?0pL#OE!gt$--WGa|SCd0xXM3wiY%fJqmUqr7CK37}NlD5nq z1+JtI&3lw53wAywV73eZ4tPIcg3`Yvj8W7&ngrieX`N5x}7g81nDDM?cK36;Ry}pwC4s z5CAd7$blHPa7!}WL=rX=;B!-FAeC8oQ4kPNf&2DxCEjhC)xRh`ELXR4OVmI?{b_4e z%piT+=q7r#hjke806$sE>gwuVXAKibz9B7&v)(AF>g%q& zG{FRLdT(0BgjrYLYBTxM!hK4D{@*-q`zmDRWNqa{NY7M8a;FQtJNuwoK_g3`5a16>~Mh^$O1IY_8XGA@p=|sxH&1RY*-ub@sY+so6WW*(7zJKTmT(G38zhJh)CzPE3^d z_HK1PS`^8$qMXC{8zX{9g6aNwSU)itAy+1{YWDNO^jMXJPx74iG#J^~hQ3i{$Syd6 z>-@IE0%Ye051ESVrHhkZag=gT$!BH%iDc<;?FvI}TnV8mUi-XYC*4a0NZnWoeSGcE zc3(&Q%*NV!ecE(OA%^K&rVRnvzp^Utf zwUt#OQ&1;=7D=#8WdnMEuUNx>Mp@FixtLxC3 z|G@b<`gzm91@6>WWl?#I-cXyBxu{5!Vw_JnlL&chv`aV%}|L)8CvmfaYl7qYtRB?DY3}ZqmEtSKEhEb}#j1W7OW;@M2sv>*`x(Soe(*i{bb?-DE zyTz6n%-?xF$Ei`r2eq^ub~=3a^F?;KR68$eHzhs=Wrt=IyvEPrBJQ4cq_*D-%S+?4 z8*8?X3wX9Cseo8vWI$xo>no4ZwXJbrbGE7ILEbgjW2fu#20x+RVF_ZmH&w3w^Bnhg zjXF}Voi#3Kk{)+o*j`2QZ9n|eM>KL-{Dd8 zA}~iYjtK`QxV&6WY{jdFMzVmRsKBfj7rji$zt0kL3KLRSg8P*y`u95YX74$W**Jkt#>7e;lathuBXgEAP-wY8I{hL0|Blum%BIapxJ?cgd z@$%OGSn|2u;YwNNB-#MSS;h}rY4=pn8GZdrdj=F z)%rDtOUqvJ$^l}4dK@m|wfD(0ZYER)9ih@9$YFtPR<D%*x1-cZKTVTZqRyhS38kL3l!5zf=;S* zcu@JvP;62Gu9-fTI&e<3E6LM)Xiz8+0O)cXlz^-ba6C2{K2A!kBSMs;lhh{qz-C*5yFVo}JP$a$l94^G1hF53PX}0Eys^2O~h#lK#_#p zY!P=oOzAod{Au$o2a_RQO;?$&V;Norcv#LYO?DnoPgzmW%4UIPna=IEn~$KnBF_%g zyW6BuaVcrBh~Io@{`^Oik8XR-;27kksis!EFs=>}Q;W^L;%{rz&Gfk;V?+l0yt>{p z%zn_~l(vUno4fy6aJ{Id;;XSuCnG{T8YO{{^TAx9{UPeZ!% znEWAU8QbBa)Y5WC;6-gvwr%0@XB=b}$2!DLdSxyW`XfBwD=F<3w<-t_r|Y8gzMP%z z9mz|00K+oi33>mo^dJ9~Uq%IZ;B^-XE+T~vR#q;Zia<+wVw8~o>pfq6m+#6^cyr1Gtpg{^xA1_U#g*jGmhz9-ZjiL zOaj9)UWZWtQp9ey85-{|0TVju&XnBdlx3iozOuyZdavsjJxYlW=^)A}mG#&ZeDA5f zj$;(c9wO_w%m-gHA}SA}LyddE97ywml4ag^Crs! z;+Wyu)#i=Gyha0IkUq4#d8gmS4Gu8A%E%^*-K^W6v6k1=%KFoS3d|!ttFjd%1b90)gS5FATiKL_l{3r-V+R&D8_>=Fl^`sA@ zGzX!_)?=7Pu>}0Q+O>|vRo83W)ZVY0gt&5wxk;NH-_?k5lLc`1@bCiwpi!{fF{fac z)!AYrp$&Qk!T!XXd&&7>!C6Y(2f<-n#~PIDnfHTwT_JlWy(@Z5zLXCx)-WhXis?SZ zqWQDogVi^^R-G4kmvg4MySTs2TcNg2x(Sgz8TekgWtHXC@Fma9RN${M{7Y%GBt&3% zpUMz5eZfZ-oq^o(wvPJ+yCDyEDQnU+Am?4yor1; z=e-Rx;D%l_s`t9t1#)z_E&LC^@j){+N zs7~;jx$5oah#LWXwjA?_7-cFVWdrDbD+IyyZjOK)4xU!|ua|MG6?aJruu6H6H)Sdq z>4B5v@0(Dn?R)vDUxFU4vyu{7w~DEXtk|}3X+(wJa;_tZp2HiWJA0d$+2?Arg+cob zQPZ~J)7C5a7)}!;4BlooL487ASDe>}u5G2DUA{fF=SZm7V~9tiK$>zC{rxK$=%Up) z?gu=9J;tqcr#hcX%D03QFxXBD6pVNLIYr>-t3$LS32)0zh|cazr}_f2lx42O*B5{zTSVtNmFjVYgUf zHGs%Z*MkwNg_T7kjXQ zv3#2CT1n}{nQDU>>T)L`DYtGfr?Op*;;uvO8#4-PYipI91Fic|gc{h?6pKWQ&e7HV zaVK6aIu@W07O6jhAM|I=LClWi&kN8MIn8xMYo(2e*Fh~z4ZI38JD9D#@fl(a4m?4l zAQk(vE+=@s{*_nPGy1AiM&*~;`*@toBXYPGyW+Td{8Jly{T3nLk@@=&sS9g?#Fo7G z<;>Rl?b0K3ZYrr?+aiN8l=4g1a{|^{0Sx1+^6p4}TZBD{j>CyytSt3r!wQX@H5N8; z>Mwo9*1bpkaD{*-iRNzEgAH90UG(t|Yoq0C_~WW=I5BQC&b0ge1^;@g&4cEmk7wL- zr-4tG`!lE4zkQW@qR0a64Lg){n#JX$DOokjVk>+uY<3&;TncNAyn3nX_pTtVmpX7r zueDbGMP`vsXOZb1a(4r3OKLv6NhHqO+w<%z6uY+)lob&_WD;wXq4wCOYrQ3Ej;Uxl zRbW(@>YuU5Di78OON5hERembG5{L2NVU+B(Ii-bHC$==k* zwbWB_wT4WM8tVBXz(IE)ii{f>kavQH$RKIjTX{i~UtSVdYEVLlqx(Hc(A6kkNV5m9{a=+e4 zHk)LzfE}jt+FU@^)Jv!O+8*yyBb+iaFFS5&wnI0)k2_TU$`gBhm055R#;0!vPNEo& zptp7QOEp%xw(|`i3vJvszgeiIF)Y)&piEWS&b+O?+^RK-A+6+|*3)HDN0K&VI*g{^ z-?@-Tmyv+1^_1(nP?<(;hogqIy`FBw1}ulB_e4iWvzW&af)4CwYvhlPyJzVqhb6ZNj2`*9t1!n-Bs#zu5C~43LZgn-b;v7 z`{PzBgpmI~)Rz4D@Q*=(o7^*D<|%>ah~}CVf}5tSuqxS!Af9llMttgzvE&FY=TcsL zC?=VNcwZ(^&qyN^k`kCdj?YE!qo>O*caKg)%iM0CcR(}RNF9x-K_s1-%x1eQUzXL> zH$>!cw%m*%xw>NHx>A zYQpo+0QNFgt{vTtbQ>e@Wc&*mA@3Y|;CAQXyIW!FmYIC=g{_*|sx-$ij%lzyk=xo$d6$OveNuln9=J4_!jNrv| zc23b|o4B6z#pOC@E`GOWbMw=YaSz_-wQu36-&Z3Z8ygwP?)s&Y!Qgs#bz7JqhL3k#pBOGFwo<@}-H)#|kS`9_Q z6%e;?yWbLuMnX11k6oCM>Qxe0krv#}-qC6AzRTS8Hy_4TvfwWz5G3e?-YUgYIJ8J$ z6;>jwG*mo)T}44!Wz^bZ_&J$nzx@zS#nubb>{XmnU7}vR*ih&@`0hmhxNjk*X*bNf zJz1LU(V)X20bN@IrxmVg5a$`TiQXVMC+lA_UJqKZkE}X0KKDSw-LE)UayqKuy_lLJ z&d9AVC`e29*kyieGxfc{?yC<%0E@JCSG0>?Mzzu?o^tbMs*U<=*ek}cWUEmadS!#X z5*JVpNjOXyV~{f&v&&LA?BHgv0#?j20;U?OM2I8*$*tv&0$*wiT&RFqQI3R=OxZxp0~e-o2*yXh#}j^5+|0O(79~vr+ol+sDw)QH|~g;bpE0f331%X8hrw zRymN${tOpXB^77=q994GMQjRXUe}45X5FV7I+lUkjc_5OHue^tD(-|fr-Nt+$yz;@VE{oXj;3 z@oQOKkD^%7{lVFo#^iX46k>~yvHa*03(c_NtDcz^t2h#XT{8RqhpnY)Hs{m#&cbp)`J$jz*VmKi8pQ1r;u(11=!?=&SWhKFvO)b223QFF9Jz;|<0o(5UiU?^d_F{4=_7k%R{i zG}~?x^&EqpaBeR4m`DuVzI*NIE{#Fn-w8-8h{BM5cvW@JH+UrN?9T>bxIIe!F+$f9 z`giqyU8#NMBK6kI=96q%kWm;_lM)Ncs7qMh2tgbLj3`g;SX%PvyD9h>vud0fOuL=; z>A3z|^)^G$9x>yTNg<1Z)0xtJ1a-8AvK}v9$XUmH8` zs%G;~^8`PC^rGgbFZ4>_4tqtR0M=l2*0U`&eQvx!ijs5*3xasO*tCkRE=A0u!iI< zyUfcssI^we2AqR_!!rbNRZZK$yT8ym^O25_{^!;FO?>=CLd3bc8cf;)V%^{skrE=z zNtXOh+>B1p@v;OQ!$r(4_ZifwXEJsCC`CLHJhY~155@?BL?o(nawK0V%>$K$c0RFT zYieqio`vhQbRjd4r<$tG!%Jy55*BU^vmrQp7lC8ugDIIO7D48Z@AJP~cT7!RfMtM`dGs12- zcpIHq-RHTYw$q&7xGlguMMSs@O)lfYySuw18>}gTUd`hLisL5i$OgxqUQvHJUnbV$ zT2D0LIwF{*vI;H$|FSB~C|DDR8m8w?R{@8Hrp&waJ?%gXgS=~lf-x_j!BN_s=AYf% z&f19hE%DME=fH5YZo?X1H1l-t7vTOX-B%=rch`M%$%YS;ADZh?m4kxC-TRrDc9KQt zvT7yWMB(Dn#X#N4b+xVt!T4hEKFvy02YG2p)356-Umf;_({2ufGQ$D}cJpyC zLy);@EMS`GO2yz}2VQG5Tk`(HtAEI4ErM^+b{x#nVEP`#6khPNb}EUn(?sN-r~@7Z@q)4g@S2C*lHCBdP% zly{LWM(m;C@F&9eViQhC~(toL+ zjy+_T=X@|6G*eoVIM^T;kUP0oBoyAXB(kOBl>d(qUfD~yO-g`mQdG|{fhLmswXTG! z_ z%-r%;mTI+BaMj-qA3{fNiVK?H)?~+s=h9-}bXs^wB&V`#sOAw~TY$ z91K)fK+h+(h+qqV=ryWPd!K)FKj|i^HG&#c| z?Q49;G=Fb*BnhkI9mnaS=aH2J=?*2X;5z=$5&Y9+dhCrcymIb=x%vZUvcK+B12c!6 zRdrN(Qp4*fo{PL&5&!J-!=#V>XjhBFrhii=vHp&D>$tM=IU|n{p1u>cM+jO_xQ9^+ zlR1m_#Dd)}dU2*$I^gFI21LLX^uu#yeAY4Ul*U)oHEE$KblyLQGln2+7Iu6{g;9KyK|r^OdlPHAXNddY*PkR) zVVEP=_sT{r!q128>@Ckil>Q}sS8fsX0SDhoY8xliG^KK_A^(W@Y=P(C@X&B~FdwVkglq=%|CaT3MZ0GRTl6CaD zh0D-ZYY1}g_adwJg{N`$s_oFM1Wtt9Qx0q*np%lJJ9gRS`M-9N`PgN0_SS?o=Zp0D zbIk^D1d$c@V1rQ`72XZ4QCe-TJ+h;x!Dn3LHD^pj*WF^T+f zbNj0A!p7bG8>Q>KFd46P-eM~zm&v>z_iyiegY z5W~j}Nx#mNU*Er9`~zjOfk7st(D10sPU-*rLDk}2pSz#m5-mK%Zy7q#p+y{^G zy1n+LL5DyfdL3+?W~h2091bW8JJw(R{U%GQA3>unrP~$h>575VP^}Jj%`K&)MsbQx zUiT>b;-U6*aFLe!Wcv~^rLXutfrmTz|d%+njzP7%+_3-A5KpXULBRFBE@OBxUV_t^JwHGFD zPcJ3$dr0>y%S%hgK~xguw>^J9=JCg4Qyp%Pk0bR}YW zU%DMUKN9G78HF_9P8`_}&6ULCBV9j&mt<7lu#UkqA%M@Z_cxMiWrrrro zYv6gP*xmQL`y*xRj~=UCtrzSQWZVTd1XI&8chS+h>zSU1iMsaJ;X1nCh^ zD?T9CVsji77ZXKUqS{`FL95PT)1Fe}8hEbu^+Ky>s~0@C(7;Oq$X<7}v^Qmkqot^SOqZZ4T~)$;AY7 zU5P8LEZra0f0?tZ-wEwk*U;!^OxDW@zJ=!$*6ydjnAGnW6>6y2vhMe{Ccx{RwO2M3 zC@$#LuRHd@`n?^e54T693$Wy0e;Zq5^um578+a7M8;ZC-Q8arWH6MvKUT>z=l7)@K zyJUVuY*gROU!o!K!2;aZdZWL_;ot~?=5TFaVnBQFAmzWfY;BjrY*N&|kj8%sxNi;< zzk)2v_>ZvtJ8K|{d?sD~+8+lXaKfTd6=kDW&@%LFhgJcxe%>p<4zBKSEvz`ky7?ewfaGee#@t-mb=q;9+)8X-x&bq$OX7c z2r*z1nyN4?b*&NtiKK<0*l0DKOZ|6mvauRN{>_Hb#^uhyf#w; zuO?vG?G*Ln#Fs-_+~#nfIo(WiXtJp&E{-eLt#7J3z#Q{@V+hqVaNl^b5T!rLZ9Z5T z+Tg;_(1b(q!t;IA(ImQSm77U$EH6K(;l0W8La_FN+8*R&+dbc1O$A$;RWx7jvju@^CXA~6( z)U{q!vUld?=WG8C#^K~fsT(34HwSHb0PaBK|W=p(ph73!^Lt zg+G7Te~{eW*m%JrB9YIBASSC9S&eZ*jAyFbcq-0@f4M#SZe#Eu*9nQi zitx)3JwJpnRE&l_NZ%7eod>0z{3(!_WOTaDy!(bt#KSq+m_>A2_V)IOV-#iy@7@wR zs;sixG(|C*fvbV8Lz082rt3R0_G`bsGVKVr_i+oGbfNQ$a?`+Q=Xmi9n4E^1T=rh6 z7Aj57JJPHedn`1&@|$}D=S>-a;0brXs!sB2Pl5{l_|;hE4G$it zIBv3s9LTq-eD(~r+sE4!cIxJ5aC)V`OwT3#&&|6-UuL+X@jCV=uB$v7Cpe3hdO=_4 z>uT+j)R{}S2+?oW&?wVd0STGpoUD;((8ZeIp0;GVZ+^4xwT_IamJGorE0b<`e*Kd~ zlq2hIUE2eD3}d-N_jQUXiSY3OyzR1R{>Pd+4RzTo7PMm{#+=QhABbxN@6?Jbj@Sz` zKqZu4`rg}7@ByPNN2H=Zhh4uGFjrY`qNm@NOc41q@3bC72!`Da47|&CSn}DWv*i8K zfkINGSI@M=D`7+h@42f}e?F}q;R2`fnwJ@Gwa5A~uEfJao_WI=MO-BNY~HMj4tgDm z6%*vW-|UaaNNuUll%P||4-fE2zICY7!@CV{CRy?m>J$m)dBK$FmNU#8<%C`=H{fp) zWUw~Sh~|=fi(=Ox7MGd}udjKa)sR_$&7pu|Kg>hAyuIlL$FEK1TqK4w9O~O8{j1k6 z@DU3^fA^c}StEmf6qyeFi;aiJSQ@VhmbChwM<)BDtCCV_b7ic@%Ii5NK>q0MC!lBO zDC5DOV&z}^_x&5LzDsnwgzRrT@%^C-@A9CtkG}2$mo;Vc?G(m%O*r()v&{w4uduOw zmAy^7akc0AZW*4FQ^*_Pz2bxvdKBUNm~zXgFD*^HNdq|_9+51tszLjesB(EW+5u!%Vg za!%WPid2cz)Lq6+=z@M@5WWi9T$3bg7ljaz?I#dxGUJXc06eLqptV9KWbaBeggIB>p$wv$oPu~71V0aQV&zU$X- zkVhVQMCG*IqQ%;}y6!S(&e2k%L|iTUW`68Aw$oY!U$khEPECN_x#Pw)s~xlZ$oTQ& zR1A1I1#_1#Um=e_{x>apLYVB|e}K%Ib+}$%aE;s51K}1gUaah|R0dk5yAr5j-`w*{3A}n3HR6pPi=MZJFEBAl{4$%E*Gxr9L5Mc4ZHi-C$17a^k>FVlg6RtjD9>yZ4 zF!Cs4E@Hqng!qhpy9CeChqfh4mdI6CU8Q#in|l>~u!j;gPj|H83v9s$)`xr66vv0B z9pT)53t`6b*fj`H@Xhl&jT%xSw|}!%{-6Z7>wmeu(}{4mTwWs$gS@;qSl+F_-!9kO z;r%DhbIAn{zxd--a{h@*^$nEcFWoG!z1tFK?U>U~FQ}AH9a|}1y}41Ie$C5`)G^+Z zy+G*w%={`j^XMw6sz}M`!Q15OU*?wsXvBaLx#KI{_1~_w$IQ=}CtlepSKJ<*Y(6=% zu0P`X^XlZ_iD9wq_y25?|NUL(;qt}fYUHd#yZL~x+|VKie08m_tuvmKsozTKx}}t0 zT_WAo{rl$f&i8+;Gjhu{rth6tFLU?y-y4{F`TZN@7YjSjJ)G-$IF1BJpnVD4K4*Z8 z?BV}B=1niJmv1jz*S?P1XuY9(xmWv7_dyRuY{8dxL8 z4yu(|{cEH~y-8TKsYz!3bum6^UX=2wh8{AjU$uPc<&R|Tj{JVNI8+JHhxVVN~CigAhDCfQWVWdgy`T6O-gQcOS zLSEXqO|E)tl{~q2&wgJ3u?i0_d+f2tBAK;kqmyIT;J|@$%PqHP;SwTZMn}{_BE&z4!`EH+ef?mvZr!To)KgE@ zg0C@%IYuaih%`%FH-K#;%6|RpUspR&cWO~~%9Q`| z&doY-bn50tGRF=HgHDP}fP!xxK!+&-?XIh8rGM`dx#*NCnZ8e%eBruAS+>gCA<;0n zM9w^>G7qb{58b%6Rc^dH`(MKYpM229D|X=)eC#+rM~SP!^3PXy%4_eaH)Olu`^V`= zSIE~t<@{Cd&MRu=)(4v82fyAXDlfh+{d=Y4dl#x7R3>P*Vj7C|_EhAS2e!+?H$5?| zQoYCg(rMjXq1Sjk|F~}Q;@i#g=Vy1Cro>|pwF_`~f4}&TNhA8__eQuveV6D{pZVC3 z%OZw>+n*R-Xn<=m!bWpTT2^gzM}+3)(=bT=|Lxx+%%y)%|3{O)}YE6!`t|}$HYD)w;n99`B%haD~AuH*AxemxTXdbHRvSTI|KRDQc(heFqGeC)aF| z%idTiZ*T29yOj}NdlXNo@~lHdGeVZv>eOs;FWA@CbNDiL?L50q3`(ffTVG+_yXqJZ z(G)vRQ6Tzl-@Z-fy~Vk+F6Va*HB`22*`mgNpha2qV;3$;#J74aa4uW6RHd|dUys29 z$m|D;IWeLOzZ<31$NM+v-Li%oL|s$Ny39GmQ`pAtR}}0L#V%JAm2JW0fOGg+wdy1J z=%bHts>M<43`TL*=e+@iu-)9eL*?~#V#b&O0|t2Z)KL3-3It;8jyADYR#vzVT64I= zf>XeMP=GWp{T zb@H{F8s*`qoi}HDk1UN8IL(|wj@z2j$%3!<{mFIJ9Hkn#y_{%1z#nieeCc`y{k9q z|C$SG^Y3-OJo&FCgqoKhD*pq+b;+qX(?{A<_PVRHUq zwe3tJH}l{7(OME^iOhuFZ=NtCRVAd*7qq zU9(d@bFcSH?HT*m$T^yN46BcL+nD(sZQ-&e`AA)lq|&dZR1O*v{zGuoJp4KR&+jyfx+K1G z*9rY(aGgJv_1jzJq~AIM{Lkj~m60lqfn6G#TV=t$D@6TmWRELRKTi~noC^LZSf^4e z;@b5j|WR zcHbAq^lp>jJ67$;{p9elh-}+Vs{iSu&9YtnC;HmhzUq(jng3LoNmq{VuXgZmmuJ>{ zVstN+6ZK1@`-aRO)~8lRsU5$2g5c|kRelVPfaPy%ud|>pb8ccY=eB={eE9f3ju0dv zLgE2nM%*(;GY3K;#vnH_>e#7j>>59qgO}OGYUW~|u?Jsfp6SPU_=c#7I$m)F@glW> z5qy#Az!Nde4_oKng-`f4CE2#KE{Mt~mu!XqII&icf$_LwKtUcb;!P*qWi*v)l z*hIhaVcRy>3H=yrkB2Qg)dO}L+rErF%tbLioipdF_Xh#HwPWju%e*?)GrhG3fsTC# z(H<-4Ae&bzk#I=FZb=BFH(gdEKmWsa`QB~bFNhr!R>)1%n=183ubj$7r&P*+pC100 za4wUP=zGc;8)dWizC->?**jqG=v%B*{qlU!KJh<5jL*lOM`C@EPaN%iV>W%c9Jecx z+z7rs>r(Rl&sO`sF&i6G#+J#$*VV~?-`FT?Hs<(SkeOV>BSiCW{Jc?a_`D}*81voL zj@=(zR4b={cav%AkRx`3?&{+FK;6|*bgchkQh)xvUAwE}xG7yn@a@>EoS(OOdsoT+ z!}I^+SAJgNM^|OpJsdwP_a2pUP%fBJCq2|oaZ}be`hS33G_y_`dV4=t7OgTPz2j{k z@1u9DdoB6b23_UsvdMqu?mOe=1C#(>U(vBXoAJiGNaY?Sy8h4JeInPemr9>7SEYG)c)7F7FJG6~ zMSI(UgF|8{cEbK>%0R(x-^*V4NXFEv2VkkOy687o;q^7Erl=orv-{P`8N=#T&f&~` z7xU(nzka>bUj_`vAl6~q^pVE12u`cAdg+Hf24E3f!{5<&JgY^zz?8rqbg-Mep zc>}B(#9E|GIOm*mw5W~q@#Dw4qOK7m5d#WD)&&a|=m)Gp?8Srake4^AAdWF(#^|`w z4>5Pfj2Sv@MAUd(i{pKi!S%g6kAju%@X~r^;Dpouyf0?vyE0d_xgC;>Hp8 zDz0~DD-fM=uiE0XiMzAK%6O6qc)v9zkKLn3eE-gEGC}R$9n{zRMbrjz?t&`WXLOnT z`p?-@F4%M0qn`Q0m(R3GRp4ukI4M^j*KTXUM=$VFf2-0a<{VTmm!9eU6|b#!i#c!o z=CpkGj&1TkXLrlQ>bG96KJxyvuhqzJAK$g#c>nP94moOSlN>iaEOO7-yIhW+Q6Uff zbJxb`C&#mnB9P<3eZN^YZ);U~b)&z3g(eiaVXwjEGO}N3mr3hT?&X|J0l}jnt@9lCK z86Jb{kogpPxHHgl-An6aq{`iU(kLSq_y_c_E|Z@q(f4D2S>EpVnBOzcKmfUIzce9Q zCv00rM1A-=t|RZhNav)ra0*fxGd_^+oh)2!Y-n;#W9nk{%(54CI6E=bD| zEY<3Fqu@AuOw*3EY%AjL%VJB15r24fCK#P8mQyGCe?g53Z=g!CFmqUyJoQ2Qe%fi~(6HdUY*VxR^*!g8 z+=C+o;q_`)=>3cKobNw919{YumLXWG)gSqTXodrp)3D0yS9s<1n;?G^l-`s|Y* z2?XZ(4dL$vuzc9yYB_s+ce(99d#-rjcEW|t99>r-3x-CE@r78%&Rci)ZJvg{s&?;w z{GZF@8YLQ^J2E(x zUyw{H=8bv$SUjW+Vi`)%C(hv)yK8YY{igotqmRm>MT@jh2q6?Zez61A^us3VD2o>_ zmOuXSk2=>cgjDQ)J>i5CWZ}Yv`k`SEHSvP@c<7;rTDxxbbo~Xs9(?dYy*n3n_u6YO znKNgO&XsGYFu-6FyYX+@kZbtYzy7sO>3}}04ZCvRefM1*$EHor_iF6y#S0Hhd*+#E zv`7k(7Hx?A;fEiVS6+EV3#r&OeBy~G>K(cmgO^=gZdN12W$<|Cop&^&{^~w}a1BA# z6can+Ke-OxSkM?gXOvBo|7k@ctWooyl|~>%^=i6__d3%ZSD>9L6kn@ zj5FlpAOE;}UF|u$vJM7VLy*y~jETP6PJn`M9_&6+xl>R3+Ge@+@|sXOg4f<@mJ6;{ zALUU$1J3+L^v7n@_g%T;Tj6gQ?|s-JSKN}EmuyQQf9ce{O6A`FO@8x*-N$&vmH#Yh zk+~N;J7eO;o;^p`&sQ(B3|EdSP9e;@b8#{#D*%xn=b(=CVn)e8{GgaTNHm@Po#OX)*=VKUl zB=@u~@BL5hkX0L6<@6(**naugc0DFwlBb=Fz=O|km-hn?EW2<{IJY$(iuUaP_IC3( z`kGN{BYz%zAc;UU`3BLF!@Vg(i$X1myWt+;+*r@(U z(x~drxvJae>i+`z{<9(?A+0a+&VBNKe@_yN{NEQMR};RZa_-C;wF54q!!q!Vx!vXH z3E}Ta81mKMtA}f>wb*7imApyC@*Au+w&O32lhrQp$%FmrDsV5qsPe+1460Wj;~(p1 zfH&>7ARLUB=fxoaonpZ_|1Vr|mQR^b)9!-r!AevPmOEbE>^|ghMugyd_v>4( zJDwjfph_M*!P}J+x5m#*>LGZ=l{eNk$>G0U7T4V_?GUy;^Y}`YGkc)S85VudUA}*B zd2q>A?<4l@5=nNWKYo3^{AF46&xEU{_KlqU!26r!g*6d>3<-SC{`Y$R5i1g4Oe5Z9 ze_8GHMXunh_URiEPX6}MR;ep5k!ig;R``8({Wcl(mql*WPtO`DH%+fU9IvTEsJ_E zfIWCOsF$fcdX^|9hLhyxXIeGGA9X7^7G{h0_eO`U_Rr%Jp zzNH`Dh5^SOd#uczJ6E23@=5u@4}PHM;0t#C&YU?@i@!!FMLX<39KGqLn`Ft7C1}!O zFVYYA9*(BkF&3|2fxZ9F-gSUiQFQGiB|QX40t5(z-h1y=MXFMzh=5{2uq$>!MZvE8 z*gwUJil`_kO+ZkP-a&d#Adub@%75OuncUgk*?Vt70s(WL=ic3&J#*&FY=8Icd(NCW zL(`{E=Y(mIVl}7~h71`Zz}p!!W{AXWSdo;JM2#9X5&-kenKOkh0L7_Or-}&LZ@&3P zfWBCPs8aZ0N67#kv@OR1QzWtb@WT)3@4x@XYEc)_)DrNOeV2Wa%R)K(&#sTEj@mB3 z_19i|O^g9_&~MOR0PvLnubg7NY9Dsc|0-3gU{#?O-B=ZBhl`xEj6f0KYZ(&P1>mT? zeM$472hPn)QZsHQvl^+?z%J?9uOQ=DG#3{MXJ* zTKBIr+PG_DFA6PPjOMP^JkYpsU_>_`3M*TjR_(~74g1)hW~?%~o5^V&0BSDUxl*e> zSd>aTjv2R8|JFWKCB(R&;e}9zGR88U&!tT-DReqWAZG@A7jAZJ7bM=o3kfl0w{qrD z&dcih&h@-$#t7@DmS>YLP>*Rbj;613vt2aXQ|EJGE<2NSo~HidCZf#UIG}@?vAD4T z?+azE=QueYquTh|6X{*ckDgc-%iGOmH!f*LvHh{>Bbyo0SuB|3Bj&&#~)Gj zd;f51#Js*{3GyyZQ(6bp#MK%CZ?m$SWkgseWJh5lve-`-3e7g84)03LM|^>{#AMLY zli8n5E9X>yesMdsMTp_SUb^t<-UM0`?fleQmg}wyIqO`^eE4sibFJcov(U4-yX)v9 zV?C=~c(mR-z=?gC!|ttS6>=zDcEqRC*!3sr+jf<&xT9v)yex#YdmtTy{pr@q_Ah3J zRcXv00l^h2vjpbs4gRV7ZIWWIx#Qn zsS%`RDh6}MVNZ=j#j*-en)P+dsG~*$%l1(Y{{Zwtpd!r1su6~CBL${U1Y%5v1Z4?^K?gl$BnxkCB}#wy$tP0n{}!>F9NbG zKc=AR82Dr&efm@Y?dB-w7iYVVvJMHljyg4Q_PusEb1Z29IM7Di(pkT~;d>#d7zE_I8h&Jj581_OC&%^6Bj zj+KEm0|e!Q(?Z_o_`?yp_s>(bj6q)?4G255W-0n=Un2c;TDdvRLv!nk3O`sHhsQI8}MLNaNti#qEr(|AUdOFz|5iZ8!BKh-vg@kN&vO6)I4klZl&-zt# zyaZnug|7;nV~N8hCi~^VgEfPxbBOVpRms=+?0`1W{*P^|N+3t$xy!4iPPJnG`7f}nD+q%EvNJE#wP z;NDe%SE=u63LB_bx^!s)a3Z1@_R%)z03@F#HKZ|_R zk3bRNs~;EJ(uOsP)1JfU2{*zof0izOQ0DUt-8+cMx^*dDv#nj<#(1%pjSO%B3`!k! z3Xa+L=j9*h#OGg^GH-ciPV_PqD~4X@67a2Dz8Fm!X8h!GKo@)PQ#S63c0%gp*Yrl- z7ArPqF`w^6>hqZAbQWEjm8942Gv37Me7b+I>#*tFN6t~7$!;PBb%qOJS?5M2>4OLO z!kQhvFz!ZQs>D6@k`61WXz@cjKrfjRYXF&TG*FZF2ZMUVY2iPr7)91!o?h%;^1 zckE-gv~%oxu^fd8;P?5jlPM|=x8j~l_437OAScc1!$5EK3eMByxE{V&1>o!MLEZC_ zDAK%=BXDnLe=1Sj@T7hv-6^oPhc@#kB&9Qp1bq9{_oWby>@>?7h2Ph_PPdFG<<$}- zwl=4$@}DmY=0uO(s_Cz1i5VBDl!+@A12(vxze~UuI@!1gX0?KIm`+Cr_< zE6_4JIXK1u?>Rc1;dsD#XJ4#S zP560{K5JFUS!SBs8lOf3{yrwaV!0}UxrX0t>ZDviGPKD%PRrGXX&a`G=o0GwS)oga zJ>U$3)%@-ECqsip3zswk5|s1A2snDpZ&)r~R3D@f0qTh*qYi*pByDThu%Q4(0it4F zCwN{0@9ll}-6uGF0X!nX7C_cEZQ9U74?UCo@2(05s}6KbVEWy1bAD&etjB0e!PhAMZ53|KyJ^TJq0+8F$eJ5s#Pli z7!MpcQ1k&HE%e}9ZUi9r@ZrO$b?epwd__ONNvsOmG>?J0-qpOh9+r{#?an*zq_bzw zit)zwQn`LHKKRKP{c)|MyG&0lxeN)PG7I9pOXw z4f3V!%)5Ngs|*G*=zF!wz0A$rr)_prfazQvQp!}2-N{Z2&@Px}9~C(5>l9k+Ayq{C z`X%Y32brAS1lHWx!kfN*vKZa*W+KHf7qMB8TyWNXrKr4y!k}uo;`Ht0AZj`(mJ(8| zbM|9N~O)|F&{ID+p;lkYY5dH$B zMp;fu#2mzxgGx}PQYAQIm5+zGbA*WF!#q70>-ozqwXfjTtzXWInud8%y>cZfiMg^j zp3D%xv#E~bEr2OMN;7C{mK1(*9EYe_H!^_1+mf1>4l!(h*}*g#*2oH203a^I=LjT9 z%Wvsm;^*BIl|kE1JEQPAni9GlOUR<7hYKz`J-_=`nCzYN5U49ipFiH6K>Z>DsBH!7 zIk$0$5B2ADzhp<~7Nj4l?XnDLj<4tXGkR5D&X29_y3WqH^g<2z?oZ03hc}&;-LX#j zT3;ieBz@PZa?ZMPJ|$?(nv;|&?@7cxhxMMppU*M#I#15hDdiOv!y%m&;vF3Mzqf=N#8o0E++-0kTP*+&aotS-=&W#8I{B z!VZ$XA*wSVATS$9H13^fA4$tFMY{lG0g$GoFbyRCPzEp%{vjf@PoF*_=@-fY4#IX; zR+g}Vs9%&LA{M#;eDPBS>$`UCDt>A?+CXwO*n$oOe$=d4lO|1?q^jo3n*m9HxfoaY zjdl28OabsBNuF7dzzjcC&^D5^0sMu)1^`|}^Y-oAS8xcU9DN6fof`q*J9Ow!0hXg3 z*jDK(R9je&$Y_iS+JY{|8rwmF<5C}EhrVMwVv1!M8Qr{jbHYy**apo8_CIMGe!wcLm^8}`u`>}M#4f7rgT=c!=d zfY0z9g7)yz0sCx`T>T>;pV+Vdo)mBoTGcO3Gwyc)K&=~=pkLk$qx2bk|L_ce7}nJ% zC&`$(4^3;7q@Snoo8Q+H=^#@zsPfEke`;OF{vz_$}Ry~uZk2w$D-JbI4KctE#e zuDft!h8FOJ-so<=^w{T~M};qZrmRR>rncedE$$rEBIeR1dVg^W`DBCVBRl)%L_bT# zI8Idf^9JW9tlb)VQADV9Tk2loEO|PZbnQypxtYSw(x+TUE1B)pc=!~7i}`Ao0PZ`E zp-U0i@I?MPkUIU=Q8>3_63nYNt(pEjRMT$pxurl}rMf*$Rue+YKf{-vzL@w?$z zChdxX|JlN2C*sdjTK4DSJ9*tH=22E9is?oGa27_XTBZbzZXQ5AYWh;`Q2X-;za^XKR-Fha29$axIr?-I&`oGvABxhO9eDiN2tv}_so=j}Y5C5f&*U`fArO4j+ zOfBGxemv2xY0AG?Ip zt$&@Mc<^*+#G5YyUv{WWExEpwF6I5{&z{w2(#9xSddfsaC6%(Lf$aZW274Qo_7T4( zJRQfpz0ujxy(V?KkHOnV>sXHm=tXh@v_Wf*_s``IR=gq={xs? z^D6_#n*9C0e|x0CCs==oX(L*eHlEuPn9pm%w(Rq|>0V$1M=f9LBJa(9|NVE7!0Xhh zQvxspkox@d&kJx&>ZoJkC^HS1;mRR_;$rmq=U>ndKl~toF#v93#@tSK+;Im3V+g*% z)y8QjDk_Q|dE^l~eE6^c&LScr2noxUFJDgYzWc5K^x((f!GmeSgbDP@E3eS9Wy=Jm z0}_rs{q)l!q1jt+y(N;d0enN^Geq_xIT%EOn_@w;jPkg+IC}QkXKCx!tpcj8K^_uO-|cI{emkB;PV7*p7W zppSB_0F16#vqt!jZ3u9)M~@z2TLDy61>3xBefazDzo*#PSON4R z5*vM)J9jS4nKMU>6Z$x6)F=^YjK1&Lvqu2NNOm`E+O(V~X?6R;N3)=h*hk-c?>$?G|dcb$(rgOAtt>Xcxb%PQXz*h$jT}nUB#$TjAnJ&S3$#Yoa z7f#f^vHOSL)m!ieGTQ?-63n0eH>hshRRPw%^RO{0ES#w%TuFzW z!{#5`oX3aShP(NV{4(Xp>GKr9)DC9p--dZr=O$Z{qM7y7Wlw-sL@d7Yqw~S0Zlw~` zu8#2m{8-F+dj7j?fP;^XQ}KTEuYKI zDn9hn$O?jfO6P+o7*8#3t7({(R}%IX>`N7(aw>mP?WalZHK=|-fdIZwF1KDQ)&5Og zZQ7R%5ACYvwL728FG}X@b%N~`{EKS=-(|;AY2jhVHK$#ej|K2`N9k}mMi5ER23EI! z=~Xkxi-y$bv3?#c3!Q zK5bixI+ruv&!cXLcL|!qK<>w!jP3ayXSCE1J`Bcw)4nn_W{_7(yAVHG-YtTYwMAMY zeIKhEOiwUKtCKk9kQvNi?+K3DRV9NjQn#%;K|gho(%0#MwvX_Ej75#VQs+hxp$b4-_7P)mX9H2X*d|gJfG_qD9d9tA zWD%CQytn`WKmbWZK~zzT{Ye78*j9+vm4b1X`R5<8x@_690$9iR?cBLj zY+tMZppSmT_J$1`1mKHu0O=?@cI=qgW-{p=BBK%At8N4AAE3y9zB+yYiDf<5bR}}1 zk=tPV_U%F)pfv1Dk$Q5uXnJ*xfDuKzx_rNE-ab9fmrC>J;by^S?5}dvZ=;&U>FsZv zQFms&%e5Tfr$5mrr8+NSy6Aq>J;Wuu3&3~mR3~r+gJ}1qdwd!2b#ETJlm9Q3BG2H8 zNi?pXw*~O^;fLyeos3|ZJIfE>>PscFffH319GTCJ^CR#xKlP#WhE3IRHNSpO7LAx{ z|FAKvyEjd{%X%}b+SF}>$A{b=f8N@3dgNy7c0&a2 z&<;NI@zR3b1{V4S_6hvms@UAyQGUO~QQItqt8P^e-QqRrI3oZ!Jdw8s1!88=!l* z6dBx5f3601?<$V81$MKXqkQ|;v7RSeR`jN-Okp9CrCo{>MRhNE1OhdHJd7qO5tq7s z)?Gs1OLoiy149|i4P8XYxF28(16mLoX z>7474Do}d{c+C>V3DSP;RE3_}ewsdIKH#cM5%E%^ za@4z`{mX;HYqdG&=(R>pj$r8Ay5Sfti8g-4fbt@7Jpury)X&-R z$-67S$`NFw-e0TpXv?vTd+w6VNmr61m?Yo}pid_6!@NTr`wBoaNlgHM*Dg4AbfP=CO_Mj^Tdan8+^~_V)mTh7lc5)-4pA3{rfh`?CD%b0#vafO-Y@jdK zu`eoh0Lt;ptzgWgz@BPDw1W-E;@nOSZS=!#AAv6F%58@l>b7xMZ~**rUTqhCz&1pd z!yd|E8@A+njE@*oUXQ69U#v&_=4}rf&{4+~Z9rGH1#lbfxfAF~fnE3|+xMittXniK zoFh;K_HHaMhVh;U>J(@L_$DemsG67W1r82{l)jE$ zf54AAH+LP8Bs#%=9h#J+#qXA)S7xWtH%l}fm5RBXJ~A_fzI-l#DlJa zNUx0#pj#LS)d>>1y>qYqjW+6>Nw`4cITAQMmz&%U@I9M&fwmnsMr!DMvn%_N0pEuQ z_-QYcw{`U`c)(Zl*}s|o!|oEUdx2or&!Y0eJpzC3<7g0$-hKFocKa2IJXR{Zv}FpyW~Ks0DCcyNM9Y`i*=~~=33Vg z%g`&5f*pZUHh@}!f{^O&4bSI*7X|B|E9-aEcg=f?x|NwX_73LMeSeqcO8{5K^LgEt zv+4BfF?Zmc0}y(ou>)AlXBRH3x$-{6={=6>1^t36`SWy&0BZ%MgXX5?=2ge(trit% zSXKMn5)FcUXjzX4dU$gb%{$=?3RufgwS&V;aRNQ7VF0nOHLXA`IDuO4umG})pq0Ki zFXK!2Uy8iQi|g(0|sm55+qYx2|ypssxR@*=7iAR3}NA&B0E zfA9@p8b14nf8geYKB6t5iwIcMLo_Y=1P~5wOWFbS1wV)xlvB&$2kglLb}FP#y=evFYlG{Sn5(Vzw=JLt#jm}55btow_RZG^8Iz_*|{d;xlo z?&D4Ob5c4x-wqr=WNj&A$jcAHK%DdbhvfL@a$e!HKVCFLLfXE4F4bFKYI_VY=T1okIHt z)|<@4)C=_cdixvA-OPKNTR}wWLmwnlbo>RnyHD;f5Ey&VK19Vb;%@4PD_<(}OPJbB zeV~(-JM8Ir$V9Jbt_GVGLy3RMoQU{6Uy71WkxyyYUe{9d76HLXR%57bI`6BGP zqq+#+R1;?@-*;F9$*zjz@{d5fmroNY7gPcL!rG@CH*PP{8IT`hsfIpI+hX%+{{&LF;&A2 zwS4Kfy~ZyHdT|-3AM6AXMtjdN*KzWBs>{5=VS)PdqFsG+dAlm!TK}L2kXH&4w;?H` zkLKm}zx0X0elguEv2~TVxoKLAoVZDv{M*!V4HY_?b>~~Ng1YfNVMgN zwX2t(S`Q`-J`&QHGz!d6e_=Ka6c6)1$0vbmCv_Q%mP z+oD}^ENAjb_r48Lv^Oz>o^2Saz4t{MphftobtR7IjpNAOXad#3S4R_=f+34OYg5S< z(Ct+rfHuaaQ+GGOw5xu2vM$2ki@s`InFcT|#CeVgo!zoBfpP*8rWvOPYlcvVU?*5! zJ3zlbG-a@Re%q=R7Xmrb)GV-Z^oBa-Vv1!Dy4jxxEalf)fdsI?ncBH?X92V#auI+V zfJBJYQTwhdW2j73$(g_V?z=cS*bo7v0aObP4q@AlV3+Hzb8HiNIt8%$%{Si&iUt6F z&<9A@uU|jv(4j*P%>;lpfRiu0@PgoTMdYsp;Lz|JZ@eKmg8|+_PXd+jTkpHEp8)Y@ z&6*{EWaz`Tv;q5eHn2_#06-*o1D7oN0^ksUq!dqV0E~R}(MJV0Ec%G?fL&~#S6_XV zrc9Y4e1UzGL(F!uJuwban;D(D#ZJE9P_sZAkxAz&A&MXOO@T+aMW9 z`FI&%uk0iA?CN1%kre(B(C^uW-|x%r;Ym)AHFR<^eLgjS>QysdWIvdfPCu`7aO74l zSByS+)}LzIM0Fx+_o-RVhb44m=64ywX0LWuV5t>Hdj9rKAic@}@x0~C=jZ}ovWxI_ zN9mB?pMQgM7g5h6Z}MXPUFS*JywUa_J5PfqCkeoJHuLD#scIZo=p%Ca6a#M#*;f_V) zuYZ>&lB7wH_m?%9gr5+@Lj`j}@e6~WU$jO{Y(2oq{ysKRZs_VejOOnDmG4sNG=sj+ zj?e?vcGjMrolKkg*vPH)XlgxXq>bfy|saqq`dAiqe-jkK%nMPs{K0eo--Pxv%Da_>6hF{ZPh z&L9_4%U#ogF);%qIxgjc7=+=U#Lz#7@v@4yg^Nurwv>zYb;|MA6_cDd@CD)PmA2^# z9fAWOyEMQT;O^UhCD6~iQY~P|=98JUbblJHx+jbRHS`QMgYlxy3HSmtc%rL)^hspm zIT|(h4DDgS7XtmqI|b4G?Mmg`Z(jELGDyY3_%PT?17(W%sH z;g@~zJ2=yhb)!>k~M7Dkg8OvBGr^>F;13s zmNstOC_rP>tx=-})va5P!on(X^r#U`OG!zlfB)ShIBkm+!zcB=!Zsp8QO_&@Cjrz+ zfjU?YHSfTPkj^Z=Y9ksJI&Ke1h)U$<^u(Ldd`1W*Z4`-^!*#@4Chd^v(Lvo4nL z13-)fX;P#vcy=*P$;ruLe*%CDUjXg`Kvo5I%>GHA0Jcg&8MXoVhhZO_&*)_9|kB;)8@0VuMm%nE!0BDXG zk=)F=5$)_fA{a~MfcyT|FmCvwV4xm-8UnJka#XRjX~I_lIdfPibhg7DJRcd zpm8sy(3!Z4^iwvVE&vUwfT@dnn4DW$lTLi{MH)FJi9VVfKpnD^$i4qlI{mcD@lfik z`IH__<@Y$jrdg)l=|{J8a=e8$uiJP09Mxk`{EZ1^=;2w(G+zR}X2mPK{3$1tL!|GU zck|~*ny4?cdmk_2#D{<7KC&8iZfdC?D@3`j@*w)w?2CE%w@n%J;>e4nOS;yLIg88j z3yo-|qR4OITqRV7zZ5V_N_x&-?O0`1Av=)q@4Xt(MN?mG6A%14z&AGe0xjB>z0KSK zzHZu3+bf#S+X&p<*`KbjmwhZ;!g%Wv!sX4Q|8A}L^b6GI(^IrHGLtU4*cPWb5_kUY zR2tRH`oLd}lclNBm_bv!ByDL-q}%Vy(-fKH1gQD2rRl5UsgzGK8r$6Y(5u>XHSf#k z?LM`w050k*W6s_TXH&uqB7Sv%FIEoXDBrE8VufDO-<27G5MS$kB|J&ss6Qz>4qrDI zovd0Q(;k)m=*B9}=toZ)8Cx%i{Szb@+I})0_g@}V z=u%AQ8q=ME>0a5m_JwBU$d2b(^$$(_(zOb;3bB5XsOlks*)&eF22i+ukT*pz<;Jty z&ZsJu`5Dd1Q+JNUMU?N;+fLJ?bweneL0~&+#7WwgcaNaj3-%MgbE3UTnF92k*D;*i zbUc56lH|?}k+d!*O*|@`5v+G(H%RU=7nWF}*-7nEt_h+$?9l-7k;E z=eCI={Q?*P2~bG@3!vP0-+f0fz4Vd*CIMW&_~MHq+LU({gD}3u$+b4~x;sQNt@QK^ zj@lf>bzjZ~)&K+oL`22n3{;-s2+LVCd-mr7M5|S+7EPZ%odygTAnZzC)JDu@0O$|_ zI)DCr5v2-XbjFMsG<^7QQBC#(^&|+V+QU4Ocm4R|kM#WW&kFz&;Opa$KTeN4@`wQT z%=)@zfW`|JED+-qAJ2gz44jV|HHzMS_uU*YPykLrvA+5MKLAiqoH#KDKm@3{c=2KZ zAnNR+%-nZ?kLc?UKl~s7V~pPiAABG{akLHK7rlH?KL`Z;DGRZ*sx(V zZQ3*oxUSZdw$K)SQe-)n;R_9r%t7hKKh8}%$Xy`6~_UN3G8PW3+adSO)g76 zi>6oS2pFF#uFgJORD_=$htoI!)yS+@(4ETR1UMaGEzOKlVND+N0*x~jcsercpi$;)s& ztL+ud7tRsrTHA-Z)-IgB{TIF3!eFl|f&3B?R1H;2m!PU;_?JCx9AvguU#bcg1y%B-D2kS&E)H#m*#wQb6tO$ zvND!GaW0Z8H3GFu8}}#pk} zKHD;w#;rJIUUk`*EBaZF*;wYU8Gg0RrqrS)Eee^k(x4H0MYHHDJn%W-d5&KRgg8z+P!9n#Z^A>SOd> zyKrh1>`RYqJw?AqIVh2EH~8_vIKlUOiUDc-&hlqRas1LFo)3hCtn=arAU&A@-DEyK z@Jr!&KDCiApWBeUO-S|-jEg8klj}P6Q-HuzcSqC5haHq2KALl(^s{Jsc}GA3R+s`1 z1DJ*%z)^s}5bWFhkG~wr%*+r#8Ngl$Kr_^bK;L24QO5zWIrLx~9K4vi6V$^`f^>-H zgkJz%F@+7)f2l9aboQZxXiog#FP5cX-c?~0lNtX3? z?aFdY(I!NW0qgvj!iTRsHH4mVE_52vXEn)l6p&O$zzQ$`g z>f6aT+w3`>6FAy<|Jf{R#1X=|k--i0=k8PnL;m0hU$Z3fi{({E)2U~5Yvf;uKl7bO z_|kHweJGMEGXk~CTDO0^!ru$8v@Z|!w>$;jiOJ2&Qh1=|W|doBdn%0{Uvq|5+#Ie= z)YUg4fO>Ic@SHcYodmQ80^Z(Iv7(-Xm$10z^G&ISuJL({ha4+_UQ#63&`-+?X!`clx2Zac{Ja-BMbj{q_y@CpCqdRb5QLoS=A&{z8i zTM`I_u3BH(R?DP4*pT2V`~WxzK^^>12R~^;`V1X2=!r5ZvaVc~ZKGWETh0TpMEel* zLxSL#%5rlbp)38uycF3Mmd*Z4eRH|gl{U;%_$mFsI$i(Nc4d882C!Goqpn&XcICKY z9zU#?Ww3>Lxt&zN8hQ|QJ3<#WFpemfV+=4EHe?-9vFPub8v*MT`e9;waTOQ$<3zun_k*ReKJfr+yD5(ez1v)VqWACSBV7 z@JHuGNS5gmksUbUyZ}K@_ldo7jgqD=c?zKUZjM-O0I;l~qIF0XT57xGC==w0be~=H2?`wTf*0rUU;UY1e`F_3F{Fix1 zHmBOk&-o6V%w zgIxhC28Y9`%g%Jc&rR1rYWIR+;;iW8l+b73CZYmVg`IU zF|F2D6qv6`ZZ@Ja$agwTsJlXV5FZ*n?EsN=1cY z1hVhpibf-kBLKj1**;^mirEzQ=2JEivu0i0GOixa^e9btw+*y>?yJ-HqznatzRRuBtz0_;DEMhPx|iT?zKnoQ$$H2i5m40(`Zjc6kcxm?Fl}>b^K9 zIE7{KteORpw3k~N2YZt%?%vn8#M0jQO#0vM1iHJS6$pgCZ+9q1UCtb%(`m;0-^>omfmVn zUfWi!KrfowB$S@W=Jl0I&o&CR@b1cW>zRt-Sx(OOZ(=4r&w7$R0TINHY&j*>4AaR> z1<{)$hQHzn;q3|O9JOn__W~cTd0)pefM;ws^SYqyn=D;mGk9@tU3c8jzFa^q>i4#(i%_DIt?)0vGc(#UY*F|aQ=gW9e-=+H6Q z$f@`rnUUa`1k^xp0RGqh{rd}$4WGEu7hPzJ|IcnnKNfZL}^$U zb8z!`sA8_CuERVyjXQSiC_qTGhlFFWBk3e0s0qKY9h)|7N}x|bIc$Tc7=4xEu05$I zr>Z{2)}2TljP<~Q1L^eX(}FSq`vCZdA#!_3U7bzYu2~1$0Q)K^9Kdf3o6VXv6Z=Hl zwrve#4NhJu(jKN54~)ymkt4;pq795OxRGHSk<$RwG2YPA?IY4|(e&yc0q4IpwH-$c*5A;;X4Nx;H0y8|O=2 zC5>0@)11WYM^50T3fd0kPu$J&?-An-uQnpksA4ve=jDHoUZ6&|CeXcuyy>ygxd$}! z+^`1~`CLf%GT@7GyO(J)mTt(R`Ky@*VpA6BlHX-ANf;uRbppDEr*6~p9qRPWWi9LK zBU#LPx@DZyUmO=er{gZr688N{%c$-?H03r#9vk;rcYc*&p6YL(2%_=tBoY8tcT%fj zacWRSfBM?Y;Oj{~M)FGlpf28$Nkcp8K@gO?0(`*{1kw*y3gfh8?Jy|sbik@;)HH~d>@kvvdtp7tRoQQ z#YuA1NYL!ocgGsBqre%GK z)2zfp(C7R?E{Xqh1^l2G~H9n7*Q+h9A~{(b~&zfvsizBQU-n4 zx+3jL$e=sdN0RYMD{JvrSK+0oT7VZ#Z^HMY9BDlNWDeZ_kb?DF`2aGcVlhO~=Lz1%h-g_@6 z@)2MvemX%ivDG0N{=B{_w*ObNY-if$hG1`_c_J+>m2hZW~u)lcStZGw`!ZmoA3=31#q6 zZZ9-t*JrGgdS)GrVV^#Is9U#g0+huvfLin&FCnlFbJBPSPVS8otu{wzZB*Q1t5rXu=$|< z_T>QjVqqqIzQ8`>(BJnu>es0x_32oWj-I+euYBf+K-zcw0!N-+ATuq(|K?{HczMy= z+D!4FlYfsGZ;KF}Y!>)_B?L+A(v8m?xpo7TTII^>`|iqEohrq10D6FCH?{Q=zc=Qj zF^97C!6!YRAF5T~*6q%sPT9%h)N=FuhJUlDJcIh+1Xcyf+;$(ef;*~CLC@ns?YU9@ zH0Fl91N;6QH9z~2Qgr_ZN%Z?h*E`0|ZLJRzvX8&4vp?3PPHv|QHm2JGzI{1y-1Bqn zDLr!A7W5G{tdo}=Kv$SV~p2W zO+c#PWzKbjmr!9!>kw)bVt;LabE|_N6-^{@qOcBGINesL&4M8mE;ok9ncBzdV!csp)wq@aE9m7KRqA6{%6N|BsA z4V>SvG}UiAs6Wp9yRao?2?NS)mmCtjx^@6)Wd{EN_DTVr!muhq)URS7h5x!w6bI;{ zUSx%Qo+4^^LUs8il+pOF?W-CKY?j7uK0))N5^dHN6}uV%JdXq9l0Pg1G{euGK=;t0 zL-fTLU*zzo0w@O9wrJ5JTDEMNpe{fZEkNX(Zn{ZCsUlKU6-0glG{zJ*%a<=t4?g&y z&{g&1JU~wTRFNPa>O>-*HiMo)u4Zt3L@DIQr5_KIscu)X{ z09eP38%Gfl5n_DQ_Asv-C+P=(T{(~G*|TRwAK?dlL$bO1@4sIpGDG4qtcPHXx_0eK zD^{$al`B^Y8))bH>#r9+mMK$)@RI`DM~@yA3Cci60MHW=&*R6B7xj@a4dsaV9Xxoj zh*||`3IF66Nn80jRr|hS!v-9H#Z$0e1_+&7!IP3ci zn)$dNb!ut_eLxZM&0;H{)vmFAQ|>WE_?n}IF&LRw=sEtzNlA1z!FfZc-7t>po3hHN z&*v}NH`jTAP!wIxwy3KguU$;3G3rL^0|%146_otFGm9Sez<3-}h&=mgGX3~MDM2ZM zDoYs{eQ;J1rNBSSSb1PHA5;PusQo{taOm2^d84f}yL@XVP5C;RK6#)N)vl}`e*hS> zCIr$`vy)`KGjQY0pp(R_b$EB>Fo2d^EW236p z@rh12PjQ?yQWbB_cZO<&uc})#fAx<5?)PWh5=0|fJFgkKUiE3{9z@jDvnyM?C#?wZ zwHq9z{JwU9%93+7I{7?3@JkH+dDwU%0avdlfG+^z)|IX2Jbcz&u~cz7!RH1$ah34R zT&I3$NvdDY^?BS}$J6CO^#h6kUrz@sAN4v_az5f52kg|>9jE)fK~wFtwQ2>x!AD$^NB=S ziily8oKENSoQwP1&gJyi#ebCO-VmFwt&E{DriSylIJr&;h5K2b<8osT-K`(U`6@L1 zuB3@>FT+s)4Ow4^sZI<>V@GkKy8n`3f3pLzbRd~JHir)%rcXcplwxCJ1$6)tWg&WY zBYZEh1S_$IdU*RYe8yWI5^r zy#4aaFU9Bmi0nng>(HS?g?}G^{IT!{U?`%GTefUTzy0q z)o-+6URKKhyh@*-7at!l#v>{!O1M|4QYE_Mjyq`2o;{-PCr_Rf)?@DF|8wQCmv)W*IdfoDYJnt}h^x`95xzMRVXx#=4K-@SYH67~g9gy7{w zOiYZ3j#gkiyxqu$IPJFt660u_T9@cnyR7Txno3hBVd8{1lMD$eoE`hjD{2SIPm zv)q*ed@C~VafuRcZnLaIZ5aBJCb|p5N*4<)>l#6OsDhAxBPU>c`gSW=5_#6)CwQ9^ zlJ!0ZF)e{kbo$Xu!60SFLCzMNelZ8c&E#*rbX?2O-+TN#J@P>^z4t%>&6=M^uYY%C zA0Wa*i_xbXm0PQl>r=@W%hG7dmz>Fz`*7>@1e(M24RvflUwlqHV_YDW=JEXCH*5b| z)+|BcA=d4O@maqwi=yM5V~eu<5O=f=!~|7p3n-(}Ej;P(B{s7+RXT*U^I;1=n(sWlu+-PC4s!7%>{ae|a>-p3F{iBZn>rvf@aznu|q@j^_4?gCXbgJS*q0IApCe?UPl3(_V z?C3|JcQq>r2Jq3$;p?M$d58pDmqYqj3!oR;gxEHf#-BDkxH?+9{>gvN(4xMTbNF3V zC4ksp_Y9-{zn>Jmzv{Z&%#W-UL^nrpK8oxxXdV59wAUv*fU0T|&h3uRpcH(^{~wXB z4<}H^P(K<{)&8?~-B29j#AlCgKBZgbwyZO^r<3K;&YS^Au%?rkO8`Ret5KHjs#cc1 zwQpBe-QSBoZC=@KwWU1Ny9BN3RKrq^g7tB!)a#!k z+I81bp#;igpCo7|r{;3GEP*a8Ll=M}_)}%VGC4)GBobZ8q*s_qaLn8WY^J272*4P2 zk!(!1BkRazOaZ2PD&Sv*ZGd_30Y1S7fIj)dI`jkO@E`gR^b0z<71%QS2tUv^0C9-) z!#qxsZx`)i9)dBD;|L$IEXPGC@E>&L*sD{tBm0hW)Q6bsi$XVlvTZkNmeueLpg#J5 zAGR;X2qOK+i(j&?v?FZ@!p_LsKu+Go{%(frjj0ucQ8*tq*-zzj~`@C)eROc+=?Tlj-(8-Uj~O4oyl@&G6#1ZJ+UhsZu#6 zSg&r~l!s1UpiC1+q)SM4r6?VKenTG(bvZU>OHG9DUIxcg73vVKgN%um|J$tE6$rw zv~!-X4Oe0zlSsE`_1Gaf(<))a;QI*^1F^+{lOU3)4P>5(oNdn`;*Z)Mo@o|8r!XiaxKs}a7t znC_!u8PCo;I`6}nLZpICg1Ie6q}|-@hdbD(BFWtd^l9Wn({EueKuzMn+*vBpE~pU* z;Fm9U;Hr+hwtCqTbb|+U4)FH~(?8hBJ*@-jtF6ftpKg571zh9FE}nn=f(WvK3?AFb zJO3n56jM^TlL@U#QI*oxdmy~XySc7EeY{x@PP?;Pv_AJE5YBWS4K)-A2RM-*(?1&o zpTC<0pWPRUESs!|)X)w*;J#piK=I)&=bU(1$xeOBVR9t*$y@iiuTaqgmbON#+ltTc_qD*qLLKo}cH|F6_ZUpV>1npo9&<Per`CYAyCq7AfV2BIAnJo~rno36I13*Y26fInE4ewh2B zv!yFTJ+%$!U>W7org^>24$9yUwhhKlj+0JT*QQRdsO;(;0eiIN>g`j3dfu|0e#qAD za6AY-&dJG&nZCWjf%NJZ#!op_CwCn@M-6H??bc#`+3@nkL_)Y>*BimDi9?wl;w9(J z3+(8kdr`jYkP#S087L{ta-|}C&3;_2<#J`Nqsry17rG65&QTo()OEBM;8vD0{a%0S z%-}nIgS&duGaskWPNss;NjSIj{v!d@tBvD^w{|$u*xugs*>6T*a^v1})SRQo?|MCn zR&%ttJGs(eFZ=^gt$9YwaUHzJ4>BL`A`Ks}3i{TwNxlX{NX}-KUzww~Tu?E(p}7}% zD-qs^Z2fJ6aa_%HFJ<|!J(-k{a)E*r&~3-jEc))Rbeg{|oiE6jV%<>}^m0Sm>!W|S zAGPimpwlo5BKxKM-{KU)&sMC}4XQmGadgQ&R?z4)e?s2>bw}bAy3X02p|V_X(}q;@ zbULP^9|4F~C7qwv-db#Yk)p28&-w4xr_%n@)@|W-926%uAF~G@6g3Lu?fcSzAR5$M zAGxIJs`JSCIlae+SkE14GU zw!eO<`bG2l5g64hfL0uIj$tLHVR-N65Ykby=q&6!ZNI+&WL`MBJdOMDEbTq((5oC& zoIYav4a$24w4lu{5k6EdzTe)zEmnP8Z#a$~zb&HH)t% z!ndF)9qN?!qAxfC*IV;Ni}VkP+CJKqAQ!dMX$%PWiwK|~o9N97oU{(0Z$Aco&wv&{ zk^38#q5fRAK^dlDGu^u@YKB?6$mc#)0;qP7{W14I!?JWkWq+Eu<`l&-;+}zA>NXOnT{G zW7PWSs->w*NWpz&VdKg*TkI()rVfBw2NVI|-vw9(AhS-LIs)7qG-wdjuU}t8$O7bp z9;OhrK9)m6Luu;Nse<+Z%PM!Os*8Db8MK*~(o#xfumjd^m&sWr}j&;Zm zM=~$c*^AVpbqVU(+WPkR_cd9xP8e}$_M9-VP8)6rjw=rSRWE~QKK1dJ~O*#kPoGC_7N+I>9C3R4YZ>&_b5b{Agtx2b}@*0b{;O~s$x0F&*-aGNmD3k z9d2yqML#j<{M}z^v|EW*4dQ6zOO>0Nc~fX9tAAMx;QhJF`XHohGs}L+FV(#(ULW^n zkM|9*fRt(AK3~S4cU^|VXZ8L}!p|b^fNxaDpj9vb^K_8lS07qHyrs~DZK`wxHlWE$=K<>i5snmz->7;tu zlC<*9FagXWlD2d%Y)gAX8rTC3ix2Q;iq2aASw%ZtxG%*P@NH7wn`*MJovZrLwC=7y zcdimtl4kY_6>aQ28%aCQYK}Rr2UkZSP6bc3ElYu3#`AqDgKzI{jB0p+dB3awH-NMy_6Tty|EGvUm8hKsm_aJ>vBHSB-r)K2Wdl24<}~PjlUhE z5)v%njoGn+FFn~bM5@?M>y`GV#l5P~n>%Ca=B39;yr0eGb-~tnAzFcm>4D*aG_I!g zg|VHRw^b`mXEM&yws-}|wo^8h+wu3R(Y=j=yh#_~8=dalA53OFS}bJ(e9!WMvnw&3 znv~YRyob@lshRY%DH&TMrhzB|d_5X+w@UcTUUKgOXalDzl8FI$lzRW8@#Dt@ zs0dvEW0fmc7NBU`wrxc@0AGNl(2t0S5cW_8pbP*XY(zt$)a0l)&BM4jQohYNpE z4#5<@A(u!FHhvN+7c9>x%UVv1yOa-5)tXxIoIKk)WK7vqF-%;N_e zQZSEgcITaU8p>fCz$N&9A*hG(L%G?8v}K+`5A+M*2!?+U*aLW-8<7Aswh{bA5<2(+ zu3Y@&{)9ea`%B4{Tp-Z;YEdMrM2JGWG@30q6@q zP*3g`*jKR+!Z!8|_$K#n)ke|$wKD?NCzETZZ~5Mn2Ar%dKm+Q4zZZ#?ri{1V>>|?Y z^)FrDyc~%-Pg^*`Qw5W6Y*&(=elL|4t<9p|9gGkBi1MBIs^c^Ft}RMXQw<1>By!7{ zen1s}zW2~Yueyuyy>CeFZidx13*&q{rcThwdIo%N>!k-mjxwb~Snzf9%IoWAsc%j4r+!Z)55IpMa_9vB z1drvi6WP4SuKE+tNytVpy~Ec`@6bR=R5tUMRC@hKBb9^MP6F@aUoKAZ=YnUe%#1lZ&@22=rzm(oTR*TK|MrQS1N1mOR`B%Z6mky z)yfsFOGFKhnAb(5uG*V%S-{@hj^Cx~f4C}{-XCeb!2?ywJIj;l%|DXJpQ!{Mzafxr zZ|BeZg!5(@ZG#IM3EVtQ;)d(hx0_PwtrdxMieK6kN$y7A-{>qY;0r627ziHK+#XbQ z*U~F1;+8&SmLOH{K= zNekGE@%i(3TE2k2J*xOp3kFrqlEjgv%Z?ehr$675tq{wy72aMcs}^xp*PQQ41wdXO77>T?wDlD^?|`|?QIoE_!7D=w364ELl3r%o0> zq|5p!9bYeq9^~j1rmO%qEl$@u|!%JnM8>m(mUK(L4R&t!M3E_TuIL- zygd&9v$Tn6kcrPLhC%C%acM3AU$wrg`8cMzSamjq=^?CIO@e$a(Z$gEJI2{(wbteY zYfcPx{k1f|di83NNDQDB08u2;diddo1>l8#;+YI!8w7ycYp=aVM~@yAz!*TWx8HtS zBtv`k)mH`33P2A@(I!loK)?L*3(cQDKZlR^_S)tA)3smsv8e)j$M z-&15{q}VoHyLJ_0jl^`=X7B?*DiY+488b#ym$tDEe*F9Izrqgy`sizuCQZbCwtV?= z`uXRdbGEPCj__65Q2jB_W1q!7gD7i&+OUlm7~{u}7s>RNELkEreFFjlL>Uqf<|Rtn z+-}iw;T(a&^)z2N$0*0E&dv41wT1!T`)~CjaOs-m`=yz5B+BvN9H;?{rT)(=GYx<* zB5|*8Q<7%?nqdKadodVXy;3nc9C?8zYZ8pX=Hj)PWCn8Y7@!9;uM~%G?*|g;OuX^N zQ;tW@)N+$!)aUa&t@&8bnU-V2G}BeH9RcX3@mG$`HQ;#`M?fEszCizbf+Gtx+wi5& zQ%ELtGc_f+c@Hz7e`^opgI8USXdU0rhu;6$^~(UWy~4Cyy+UzLyjGlP4mi4!0oq3f z`!hf91$ugRGWqlVi***UY)^;A3tuM-&=--3vpK;VxOmO=ac{Qoq&|dmKS$ZS6VN4$ z?CRv&GV3QYfb32(LPPtZ^BJ}bSB{fm0>WqwEuNt8dlnRuIN%a0j*ZVS83P$tEsO)X%Hj)YRUmyxP2!dd;dDUw%nje-tp~0J!+vKD(Os3F(_* zs?pIs$jLejcBE6-!bEz$uk#c6GfC%Z;^YjfDVgqUmH*uLeQW!X zugxdjNgO@7>S*q+Q=nkDueEcZRr?FT_k&GEJig62+F2({j~KyNosPRQoMxVFtB;^w zcqo~v1o&E~oQv*ltiOk{qb_jQzTJA^SL0_V)o^t2S8yO1ExEZAPC3kheA-Mej~jlVt? zZ?2lF0(!AawKl+y#!?iz~WXyKdJ9o4GMY+R8>h8vIFeaRWr zPwFejxYocmT)1!FJ^=^<^nw5o1-Pb)1O=f3kaYk4{bDshQUIxlg8b>HpK=n6A-WPa zwr}4q%B3wtv9@mAT7YXSR;&=kVPTxEl=b`o06+jqL_t(6vVVW6BBm0ggN;Ln4$;De z3v<>(f7GZ^Im_w{08;>q>({Rr#fVBpv@W7SfBp4W0eWHul5zpq1Aw@A@!}j4O`A3q zK(=fjD`em0yj+KJIiF)7moumjI{>Ny?jqV60=k2qJ$nkE6@V=SeE`*ioEI}L{t&4O z`@44S65Cg*cJADnx^(Fx7u`+)x*tA#m=-KpAans5BNAHn-P3V6c<`XujuO-cfDKBG z0|yQe096P8RZvr?BF7)?ojJpaM|nKtI3cmx=+UDI`|$6-|DKbu4&&6XUq1nA!K%J`2 za=9MU=A4vl^~a?sIgK-O@~5WY1TL%jR2;gA4A@->!A)?nVs6@YbHk$2U3k)mTH(cP z`Fn>jT>}6|vuxt%>*vS%QPW!1!(}DYFnqrpr18WNz*g`Lwq|^tCOCXco1!=GALL65 z8JIuD(Y?75KYo_^`sq?s(#*MMavjmEI_by}&I_jLAI7ErpW8F(weMX=1KiWskML6k z3EaMa+IR!1uJ>eqOiy3JPbULjh^MWGU4MSpmy^0Z!^z|j0i4L6qXUpeRCNzjn zGs0Y{6xh0tSEo=VPMmjV#{l~4Kn6`>u=h-|b6>5-$^TkMcu^asX$aQ50PxhYmP{S- zLch`knuve-w=en}os2(Eu|<>)o(>`Xb1Z{CUY|^NwQ?l_f6%MYf}107tSi`B-T zT^2{tDd%Z&=O9}UN!5Rk6Roug=i|7N6#xSTg_NNU?ZJqp?4yT4cG8^S+_eLI!np62dGSi%?1k8eS+0vtz4)_kM6F~LK>R3f|k5V(C89JA6!Nj+?jog4xD_o-C+>r@)8WjQgkiX<3?7-}oeG^ ziw@5ZzW^rRR54KeU@wuQT{py}2~J*c{T^U&b8liMftR-=oHNOmxb&O^aMK!Emkxof&<{YB|Dn-5OeiDygiDx^{i#E)uF5(&1+M|PzFkeBDu5? zkRTYQ028Gka#4b8vP_*qUkU(USs(KNQXw+Z6jRui02%D!2VE)9QQK4PLq}bQWoaAp zX1mZ;*P|V|44@Jb$`JSjUDY<`p#xFNMS=3CuEV_9U%UDkHF+FBN392c)G@(&h`L?i zn@-1}Y5kXeVu~@4AiiqTTFY4i|78CF`bvQ<{8ayCTT(|iMIAZ5SdV4YQ|qeR3p(mL zEStwbj)(LKIuP_#_7}@$J18@Yd0eDz^b_`B4`rU%GTSa%zPd)hd3L(G{J!ko26ALx zv)b01;zK8J05S&)e@0z0Kt1i@#78u)KWF%45TzzZJK7~zLuaC6FAARD#tNv|xfugg zY;(?%40`TPBe2%HLrJQ^;H6H^#9yF2oJ6coNBs@Ixz5q-2xN14K`;OMmivd|{Pv-H z3oBrRHbBWRkwJ8wG-9C8-`BWprQVK%=V?$^FY43I%N9_NXRg}U=A_ZI@x})oAO4K? zr91s;^fbrYPifQL)YqH$#ajA)xT}lx0LdFhxIXlw`;(lnxr!3bOM*k_IuFXTIJ@@6o-T~Bs!QwdqT zd6c6s0j$3g{m5;bQx_!CN~Wzi9K*>S8JMjfR)QK-EJ-bRJ~YsJUCOO}=vU?>S@`v+ zV>}Py1;D99=BejIeNjAJcO;X(WKQ71EO;>zq&a7&<}np}3V#N#6P>*&OXF!MG;p-e*_uKx{SimQn&^R6=-A23wTZOxWCqRXRhId1^*8DVVzOxZs(8WsyYHcJ zt*PB-9Fb2rr#5nt+$}eB4zi)G3}&BTKI6(7K4a*Fa-um;1rH`^3Qw%3^$NRQGfjl= zHr^)DoFLEe1f7SDjA@|1UPLo@Ex3>e)YR{H=BB_^n_u$P^Ir$NMUNOWl{9=O_j*k#I0)9&En|W!%0~J{0_*Da6iRkv?4N<{y3RJfATaw z`&jH##h+$(4WqW@IUBtPHzJ!kq1ruvonm^9i}d1_7+MvTMzcDUr}8#Hbc+yQTG6{I zy}b1-eeiF5p8Id^V}35r?Od6^>3~SbYSpR9%E1{7 z+YqcrU!*?#0F478d!d75W$+PtjT$u){$aU#_3EMwytlP$*A{+22mGwkmh=TvwGFjA zH}mGBS8WIFA|V?hrBRMCK_W3EltX{u8^#F!dMdDkA1E`hZICPtV-K!c*z;5{j_|K; z-MTq`U>kTMa@^ntwj=f(^aFliKf}uf_=9Z#pP++2>qL$L`hx9?WhB#sE!1h;xUty2 zpmBhI60p_TGMCA=aU39u9d#pk|HOU{4rJ^*NJxmtX32r9+K|T+`dK7b{|LB7eXjog zrTIJ*E!m-`#g2KCy5>!=|jVB@XFBxEyLXdjStT{hX4#i{@9R3D&UI< z&ZZ19|F}GhCXePQMFt4ZGMGDOaVCAeD1(Of^r9vTK{GjhbM+(m0gbC5MR@V zq?*Acsd7+p0`RJPA3^%`^DyD`eKWHgrTLq?1Hr%N!RjeCDP zYpiZsZ(PYs?$%o>6y5@FzL%^YY<wU0=#H?BL#iVZscgH zqkoN-9uQ~1ARD?Ioliw&mo@?toa)%Iqe#Go#9RPX0dU#zlLB0O^wCE}!ZoxGKn!3Y zK)fkarU+J;(g zH;=Y+QwMypQ>RW9;{@;#U?Y44XD^b?LEt0$=c&L4*a9am099-QEMt3M%&}gI&R^+2 z#tHt-nl(#|5&XbQ0=Zt=&`qHu+X0malJ)^Og-vWz*unM%=!@~kJ_9gbC-5EH65A2r zHTG4E5%xcD>Q-X_S=xpTP)116^-r$D6zyW1!AFg8fQ0!N^_l18`2Q;PC+|NXam(}Tl3>zgaP|IglefXPuDeZS?` z1cPld29YF^5JBV&CWA2;u*vpk11~vaa(ofV~qL2IX{0 zfP@TR|7y2pd(zJC?a9_1)z33GJ>At6{yn-o{jawAKRrkN-A#O{i)5SVvssr~Ec<_x zqbGjnj<{l`yW`A7eOZi3R?;(X&31ckmHwD)QumjBJ&m71|M%BfuE(W6_dlH4`7#>c zx$At%g!^BZw;x4M`tw709_H&YPxcv~OXZX>$-?06!E@a3Ix=IEI=-mkfV$+GTApuv z(Vrqq#{QT7P{*9|3Zb4CKKRCcG_rO=?!95f|Dg}{)1Ni_JNNpa#D9vDLi6BxofQ|5 zL&%b*u3O%y?-L2S_J?-of33ukk*&nUB-`C^)nJ2!wZoYJ;$^nySyMtdC<0kt*a<)W!dgLs3*?W`S z`J1HeJuz&$yWpM59~|!T2fh`xI|enm!#geJ+WSMvsv(=_uZ)}Jp8j%%`^}2k*|K>)|8w*VcjpO1ECF>@B?-)A8t>|m>E}D_aP5Fl}Jn?$s8rkn7zIGctIWzkZz&;#yDEOKX_ahm!L*@@n9OS zekZkgf9^|Bgh52bC|*`HXNk){34r*qvIx6E>vAMM;;+s3nQ?tJPS_o?5X zz0RKDZa#HUxAl4pxDP&`>rTCCmRsLv>xLw75=TmTyxPC=i~EPZ{6lxIZ*gB+_ZnZ~ zVT%7Ffwg&qz`H}fb-Qg;8%aj3?aLo;>?7)V8NM9YGSQRIiKkW7< zH)7(v$!HGaPxiCw_m1_YGzNMyJp4~&a$ReGe)5B_-EQkF?Dp(Z+aK9R`oq0m`1ou0 zyg$h9-H~%*2LV0jiQnh^IG;$~)E|Jin--bfVX5L@1^C7A%zrewj8CVwc8FVz z|Eu{d>)})9#`ZMN`JGn#i94uE{zryCo{;{XB2Sl%w=4o#@p?uH$fiPT-x$CF#WG|8aJbKG=KJpO#4_|Kb2(09i9vItkdJ0%=yk^Zd%n%rUw z{>c4#!}x#V-x&RsJM`tTzO+Ki{!Y}*+go+91>Nu0sFb*P!_H%OP%5YpY< z(w)E8bMLw5-0!+)t>0h6z=Ac*JMZV&&wlo^_ma-{$AVZm+Bx+t9{-{mcGCW!2A}LU z56=8pCJ5stJB zO~!c?(Lt5hRo-paJOwHE3{|NlzIfZ-2C*Q}gI7eF!TJDyGNrJHH9h;^90wVJe3!NzBw+I$b zXR70*MycE7fn5TAHbdw7a{}$B#FLK| z*|uufq0tCr5ZwynT6~*bJv+s>*LL)G3*nAJrv0Q13H#NX7#AjD78^ubclZ+DpX!*# z^=N6TO(2irQ@Q^W>eH*rekNr$f2S_mYB9Z@m=_dE$G|Jl!%sn=v>Y_-`B_l=lH93( z^SnyF^Y!0VzIUBEJ_-lJP+E_>NurzOa{02EW_Ez_Ox)$VH!6ALz9X93gX{;NQ>@mL zK6*u-m>45DeAF+^csL+Ex*>+RE+kadsM5 zs6q*+8?xyR)2rtZ64u3^A=#C=0DHQG^X+qC)Yrg2KHI-vJI8HDTsB;~$J*Hb`vzxZncVDR@fYR9N48zRx$(mT%mAD08PVZciE?`cubm#LNUO6n0jt zTDk;sH0iho>>90Fyygb+t~3i*Qibdn)E+KJVysg(phPTFsrNn6Efr}X&pI_a_1&X~ zozcXDh(^YB*Jd_}%3?u6R<7?y8|CFy*6AC)mvyabE}69>C{cII(hqBt=MQ_bpA!#z z-O|PIWbU`(K^^aY9QM_Y}$FgsTaH|ISt8?&GGB|=p^sd zf}B!qrqOt(?^!&=bx=vpHb2gbt#5I1A|g=HG|L>Kmn)Rj>iGREVduux;*o^0$m5Xy z^s1aQET(urrx31+5StQ8dP zN!y>1LFdkiI2&`LG2kzfHJh{v1o;-en_q!zA6~Sux4poqJ6Ah%(gR;@Ve~w%oi6j~ z)jRQf&&T(kQ~f-hJ1vr6IPAVH{^Pcoyh46w6Q3L(_3~O-?`G%B65l@1Iy&9|va^Rx zU`MUvmbdlku=w4~5y6g<9;28#=(Q%e`cRoyC&D{jm8J2rH=P`my#y6)V=4Yo0zqM2J2ax5H` zjC6+bS~=QF?#954mT;bcjIqtS4n2}R*7@R(TZSFRby z#M6trzFo7~3|v!M=~kqZ-J5oMcv6=PP2ZPOckk5}&f7}ZDV>Y#rEAteDw9dsxj5u{ z$LZLv+25e7Iz&@$=yRd)+IzZ)TvB*jE}2ud(h}kcX-7oA)RLSYnZmzt3~h|I0#%O} zTAIDPPp0C-C4tq36LUyyMh2#c;y?R(wmpLRkrD&2jiE(m3ftG0+G!hnGN+> zNTGyz5WHOlQp7VFO3oQ}{PiFLhIW7VPUFfIk}EH|1bfravkB}V{Tr9tl<}g3_i0mK zQ<{}Vb;g#JQs?FJ%To7*%>hyn83aY`SsJCP^&wIyEdUq<@*i^RyU&BNlwt) z2R)CupsJO(Ru8upJvF~71??Iw9t>;yjWO!vC9>IUMMRo!-1m5^FPA>KPyQkuiW<1M zEaP9|a_n|G_;$Q>9sP2tAs3~J|GxE$({;06W8k*f7Nnlkuf^XzTnLIqc`8N zq0-o;lE<4f<=acBz})?wFLxr_oM6BPX=L+S@5})?GYRKiLh^$y{TtXDT9Wl6krsRw zQL~Bon_jZ-mx5LEE`blCyCi|G#;CNBd)(PpLyPw7n!~NvT8`Hhb5#+KkZ`OC0?^^WS|CE_NG#_xm!f#DJ_3Nt?>3ytMiI;lJEBoG)lQVaU z)MES1HKl{I8}BMBcOCAl+-{89B7gSCTNqIUdw1nV4l$GUD3f<9?iFypQaX@MUzzEd zG7VV&v-NRtB?d{Tg``U-!r{|i=vhTr%1Moznb+e}72T!wm^K}L^!cusHu~$1c_z1v zE5o4)i)rnCit<$h!LJ1Sb$)#Ab6xX!c&%}vdRUUTi9;#c6JMS<9do#UzxsHy6ru?} zWcxU@_xe%3ba3Wg(6{X!QQ%B7H~+(}R#w{4X>9Q#>Q}wN)CsUF~_8?f6wPi)$P|+gV zZ3$JrwDuM7Uav5b6X~(my&KhO*>^3U)+Bp=!J93 z!`>)4W_z?ZIrd*@c!;b%k3aHD^!x%QRlIRwpE%rd??_(P9Q-1ExE8D@x%$0F4Ut)U zKlD?;HNq~!x;dI;`Yep^qM0Re1D^@ZJ|fTwv3fd=hlJa@M~gRKwPsxRCthRRQQJYN zSGfDTVhhc2a*T037md*)iFknTJpO7H#|DiN_$&hsKA#}vayGtmN;<{r=u}PuO)NHWuTfGIX)jQ94nLNJkl49nPRD7(c|Drji$Km>i8YofCn% zPU4QHPVD#oG}3oB{U#K89tPcu%KMBos@Ic0dL&fr%SXN@;MD=2qfqlnwT^6G~=+o0JGp5RXK?IwxCoZuTTxS^@y7?{H$n8RkDqAvwy*u3 z)h>dQ)+zf|D&L#)l1@QS%g)p3)vo%8w zvH3AGzi#N`XN|2p{X836?*xR}0x|{zorPD)zn-$!7M8sh=$wLJE%kqVz!r&UO1x4B zn*BS1hxY@TH%?7=agA#mq`e67!8=czt4O}w1~K@B!F0FVug09Ys7txucSHJhU`Er^ z%)C6c@ve@d(A&j5-!F|EkJiPv@75wTSlaPj30FQY<48TLjg(U!vqQUsd4ua;&M{M| z31|*EdxRvbDkRsCyVGb1KJ#=oPSF}a)SG^rH9Y#KhPn0Y-2J_D`V8@-2s8(8k^K9W zi&7W2qa$sYEMf~@7Q*UizsZ8M<$8nxB`FD!dF`~D=CgjUCx5dG-rG|JF;K8MO?ie# z`_5#_te~tWta%ba|NKEeh{mR=&n0DbP5vT2s9el>zjdVK$I;Ew^~hm20i-4?&^eiF zr|TvNKKuT#bH-zLzi^!%wwY_2U`Ne?l?0u$yjWG>np;b~WVWn~_*OLQ zhVF8AF@6IrxQxTj?3YGwVl8i&`tG|N9nX>;x|1T`&BtUugZ8#Jw+f^_zPM5T?gk#3 z-Z5$tnHa{7V8b%a!hi2Kd(wYv$Jj!JgghIsWkvTja%VWh7vws<@HvpIlc{r_cdwXo zotXFS?KUBo-7cIP|W;;9rT+ zn6au}fL{UHhon*!R0s9))H zys#W%K8Ij#U{wz?n|B20#dd$-(x@!%Qk9f@lXWX!Ogb#C4N6;nax8~J?@^_<95^Vx zMoY#crR%7mzOp7>+g49#(3C&qw-yafbtly-q zfT!Q1sNQLP9@L!s@WCV7VZLz(|B-pE?k7&P*hnGh?M)61NBWftGlyO|mEve1@JVdPVyb z(+O7GlA#*VzFJiH%@$>mSYk<3`uSXt>Z{$Lrr`KaUa>tKEEsFe^HY|E8wEd+UjawN*C7K4+c6Jg=xBLnS+B#|ZuN(2LFd9XQ2~t_iDboQ-1-7u&W29bF4HSx0Ie)Z&2(93 zHF_qUcxpyr?fzcA_CwEQ;;G}6f^XOxaOD;fIO#KGMFS|X8k_Fr?_uvXq5e+Zggla% zkGGM{dfwsFH~Reh_`RK*W6(}_%-5**3E}$MjltKkano?KpV`zjSs}h|S*Q89WIPvH z*KNj?dsAu}sndt+t0(ptSCLuutlD_0$C3u}VtW;zsFcUx17=;Gevv0?m1AoC3=dn5d&aa}fKOVD!qP2{0jK$t1X`Zep{)#eUj(TOUwbp8aG96d7q% znCy|CY?~^f>+a*Ha-y^WL01v9M~n`)SKVZs44CR|MRs9vI;QbmL;{is;>zjuL>`L| z!n;j<6sS=p4HLPu`YE|<4dGwN)=Ez@Ji688RV!}V3H_*tlE}ygGrn~b63Cu=rLTr8 zW@97vniw!!-sgGbliH4<(uT#jeQvEiMUx#MtsL3{TLxUjkQTO&$HYElHVe9!XMTn*nMD;$q|CJUmEM))ki z>mLu6&%HhLJk|yeIl5k^fQHU2=#Mo>|NCbFA8?;a(i1ze5-fQ>x?cjfFgl$CK4oIu zUP*0Qqz*`THgc+l7k@Eo!sTFG$o=+D?E+x+W0c}sj`*PCemN0d!$2dqZp$AK)a zy!Znx{%%(*;%@_ilngWZf4#d&RPF($nJi6W z-U7b60k$Dgjk^7(M6=^q2V;2=^X+~EGru|KWOg?zzpz&Z_RdmQ@t^USA389X6B%T9 z?7DReISj-=ngKRMS_W9jiPr4eUjER$;IyQ}>Yzf)tBzzFX6=iLjqgJs_;beCytQYq z>rOfOt2GJQr-!)OFlUArB5k1e$e2(6@i0sJECMTG<3iIsYTVmZ!#jF}&!SJ;X(3tm zpjT+7hPrBUCgGnKZx|nhhW-IfPqML}xm8=(x19g+?$qDH9}UCk6pgD3*`=9q?MKO+ zb?brvKo-4uR^Z(6k_&C6|3iMnz|dB@=VB_rM7`<%E_lBqZw36wp?2Qx+ulr7UZ@Bb z6cFMH=YQRs{C&Tt_ZV%9=xDv$U>3eA)NTIuA&wa#bY_=liaCEyBz)sfpld^*ZQ~t! zyb&pa6=jDE_@b6K0(=!L1^3An#tMnCf1zB^aMd6$Jul&5c?T9Af29;h{ypNy;Sc-m z;fw=-$O;Xh6RZG!<;4og%XlLZvQAVX%z(cp0F8h>>_T3;$2(~XGe|iEZr=W~nY0|0 zoKhnE71>@{YfEhAhb@<_R9YA-9^k`T9$M8JIu>+^pp1C+OAUqi>Q}oJ;5J)p2sVjr z;Rd4Gp2vJ+YIqw`Vb~LXqN^PJlvJcL{l59};Z_XQeHNh5wxy#TE}Wxw=tUCs`pQ0T zro0?nLL*$^^E`}j`SP!OCcuktt)A|ni6F1e(X--*mU-gy`FGzQr|-wX9yN$(5AP-P z6de2nhpp?hml~pEmsGJH=|5!t?$$igYi3oSUfx@Y(g&@RIk9 zsQm&7hcy(>mS-i#y~}bsQx=IIkV4~1gQVBTynOKyovxU*7=ulu!uPP6UvDD9%xUr+ zPJRnyh_KEkD)pOnWMef*@Mf35JoZdQCnLRBEW+TIJ>yG~T~uf92;*)836csE<=dOk zi0NENPt{Fo>;%UO8POe0vMLFbr5ozkKqTFQfiRAmFQnfi@5UR<(z^7qwXpX_|7xNi zP=*+-bl6!eZv`zaCsCtzKCR9iYN?h(4n49=y*1V)`RADu(1wlqbnw-#pmDf1uIt7l zkKlVM2JL@-&?!i~8J+=N;Hn>@>0A@?2Xv9zf)116A}HXivEJeWw9pDoj%Yl zN>knL3diXAbv`gPcJ%E$1KNE=d8QF;+U4JPF_E@A;@GQBIK`s_xWS(r&tOayS*=%> zof3mD738PYX4X?VIx}a=ixuJd$uJ`<7<@nuYs#;k2CgiYJi#^BDZElk#8BhPj_fRYsxr_FTJ6VoFFM4-+c$ zq{UH<@fotie&w1YqW^jvFBBG)zJrl~bM(whYTk?}V2hF%5dnTN?-H9p&g1S~d4RfI zZxeAOM$7TJcQQNsnWLM=?KizU;Xa=HlWck3G{+unhjJQig43Zd1xjTEj}+|fVYq0p zu7ipm^5~Z#Ww2PK*$>IP!+hp)JR7eHQe{Gc;64`yY1OHX1qdUMH0VNyk>PhLMPKp? zCsAW>{O4lS&Uisg?57_LNJRCqa};h3X+Ou6r0an@PtNQ`6&9JczWuCeUcKp9E;@9a z&e+rAx?itBnyFs&y?DTE7vqRC0T5;grXvvwX*O85^1zf3X^Ju^ZMc^HmMig+yl%^y}?%o{DU zBl$z`5}sY3nQuiFmor*q*!;+ns;=!(EFzKXlX=b%0@GX!lr)4|OhfGtoB11d#`p7p1QAbr^ zqSA3LBhh@|AJO#67lvuSw{AfLu_-3wsMWAtGzO!=?<@!%@W2$sse5i1FFe0p{A-GL zx)_wbLmVX3tB@OJ2F+$)-&f-xYbRZJ16E=+1y^3umn+wX>6d&p{FU)wFQzMzU$!l{5pc_;!`y= zB+Gzxf__xdPXuEos4LD6@Eg0jo#RNIZR}7RpsA=FXGJMMpF!j4eje4A0lO&%|54Le zhY;6C<{U6rg2JV6_cWe;$=dW4CvdX;|R@@)&X8k@1=_Z4AvM`ao97|T{dP=f=P zK#kKB$#|JD5r>|qzqy3>E8<6=46qH7X%)8%ZU*c9VGSoQAG1(cZ;$w501Iac5|NbC zB+Tme!A4{yvvkF6!IW=#!}W?G(6iwluGJ2vwJTp8*s-tezJCkFkS5H`bT{w@!OP@; zTg%y9o@dXi`x}2R4_FH^5Aw%o4fYpM&{~eL3X^vqRS${^VBDN0nXGCp?oe~^;yOfQ z-6i0htD*1CBG&#dWR#Q-={Xd8|7DPNI$S2PO$LCLQah4OF$LFc9-BA&*M5_A3Vq_j zyc}S?wD9(!T63o+qbnMlN{N4-Pyber|527rQqexd(-nx}Z~9_Gw;P-enUEogVWdROD#Nw^I8(d@m@W2w)+WtGF-41V+OOzo5V)LIW;GJL zT1J9AAka1xc!I>SDo6nPD8vCUwuyduJ0bz@71Cuh9*7KGY`&P4#*$ytZvfoC)f_|! z?T<(`5YxkiYPv2)PQYcukekLPp`!W9Gmo_zjXCjm0>rdBU>1U8uixDu0d^l>aGcT+ zUdII(I4z3`|Hjo2dTn5T==p6p8D9fnn6AsJZVgPErdmBcAws=DUDg+wllTW1=0|aR zz#afTf6Lv8G(@@m&<{@z5h~D+4`X(-Tk$7oK*ysA@&p1>Yxs7uW3NMUV`u!=-G9qHf8}rGeC;ViJNrSDuYZV~`WJr!%YyjCt#W?u)ya{1|gy zO)pKTYB7%L^z->(db9wTCKH1!HsZ-8Fq`DQ+f0S9IW_+I8Poh3n_YZA3@oVIJtJ}! zIn(df!{KrQo3X!0x0STA5k{>9-GD9uB%kg%BvljSZZg_erU$BV)M9%ke)V&6foUdqCK-$q}%EL}}(cUD+V>|02 z#@sRGs=Oza{(NC z%0D(`Yhg>*`we<7;TU*PcplZ`*@a9pvD!Q4FW3N{=O91xch@O1B+i;uBTZBj{8YQh zp{nyuZvTDR+^6O5SjAD|fQ&<=sZ>DDECDr<-f+7s(@i1U zs|lBGVVAotjlF7v2q=L)+yQ(Gg2N_pF%m!omHf;N+nG2JDU@nsujmb6GE)4Mn^yP{o;^eW5khk2d(U2(73bQihp3Wj-Ic^Gu{a~jJ9wj0LHu~-v>`AC`U zpB;N=HNiUbbeGBDVT^d#iV_Kd-Y@*pF^*7E!%)Juq+;C*ughpYTaa&y+q41c(-jNs zR58+x<>a*v{OLm2Z>@k6|3p_Tx8nZYt%APqjv17lGyPEHJtYnC1%xZSP3d*9c?jE- zI303}G5DEkAU4Oe3Aw5__V4m2^@UI;nw$(GUK@TBvD;u18Ja&L7akl2lzrl^B^6T!f9zIq`fXKt7D!Ae)I98EAzGScEZ^c%Lfa z5E=X^MhYXYfDeE**4vO$-tKuZtc5Bv<7)`$W9mULAS86AAB^cLK~b>*Z74&ZDaEBg zCW)A^k*B3%vgjY-nXyP4ZwWh4vT6M2Hy9GKygAO}69XeBrit@$e45f!Aw+t=hZm4vBAgjuX(Z$)+5Dn{)m(+4JTwr9rd`Fw?G4f zC5S=PFi$Qu`zw7X?E?wCq! z;)gd6=sC6rp|=*jk$$fz+0KEk;~T3dQI_e&9{@Y7lY3l!h6E+@U9ZP108Gjs02dwn zt>lmm3b!GE1)k2ZDWvV!3xWnU18VttpdHa*wSv57Zro?(dG#_~&;48l^1#i15Xv?P z%EW$zFhL#wf`r8B{(P#DHXS~yK>L$k15|2Hd*GB_g(CCpnz(J*N zQ7A!J`*ByT)ef*@DHteX)RDemxJ8Eykp@nu0)k|#7+FrmsDZ{7w#YKj#lGQEi~Do) z%A{Z-ciJ@POwc{3QOmL>u5sB1LmO}=TmTwBW@MIIw{b;@dH~{${mpA(9VG1hgCjw@ z@^IB7DDEZ|%KqRb6xuYWA7F(=*92&G{LZ>)EzFBynR-A_%)$AT9)S-$D~!{whs>$C zNiDH`M!!h74Q)xaMrX75pA^ymzRLP&<33)|R!JFB_|z~uWFn17nBFMGBRi#1@B3~I zY;TE4s>BTbvFhq48cBsDC-^?~`Jc5ce?$yuBht zrettVnojZ|81mSW+Wnfwp90x4Y%o+CVYNA>Y0em>s1Ziw@*Yz4>Lf;dlA*!0P>tN| z%7{wPyN$~3ok`0Zx!Q{QZtWp|FjTl(N0mS#x&V4P6YfvYU?Bath z^(i5E&vcu@SHtn>AWG$%<((HOw;}?u3PxtjA)qj)%WH4^5RBqZ;B2g$++A`Nofs-` zHUiL<&1keoXe^0rTUysn?~MmcqNok;ipt)(QTSfuOuQLZuSifO`N?DT@667>zjuOE z$P`2nNz=0$p-lhvjPSkGdK=x|8U<9>I2l7|(kf|$_3>3NQLC& zqkE{o2lZA(4(!LSVlu+6KyY3pI>wRr`*=}9PP6m4!UHu$%XDj+5zpzqaYru^)skC=4G5+bC!>mTG5I53Q zBH}{CiH`9DIuh2rk2Y1z_lCyt(FM!eg?S}X!mTRTg75v_<{-PjND@|S7Est-2N-Zl zqP%aoGHHpf%MiG^lKhCmYJO~5^_%mX+2{%BO)%4D?@^9?fVG5PL*^p>?syI9*PnlW z&h=5UD9C@OpHY`)7*{vOrjVMc8cgMrZhDt)L`iD+vPC%rX?~^^aHD1o;i{L2)qA2` z39pD~Hu|ok`ny-4K0v6Zm)PJhzv5uJUo^=&=?+Voj@)RT1vnZb&*KOsau~jK%k}04 zZ=+zm?+l_3i-fa2DC$ze_6;Tc8{^N&OA?oJ*(s))@#sat$27*8(0%Kz!PGZ;D%}Sq z1W)PmM{N&tweFm1)-yhiL*>+^(b2Pdt`6vaoGxR?6tw}v>33xYLDj{P=BF}k3Y z??X5wq_cq8j%bDdChBJL?)#MULiDw=) zUI4c8kv)-K7J(P;D;~fpbi~A~KP3_{)}KCoR}b`=n=|khY+&jWEbjh6Iu}4un{XoE z`SW)8OV*_?v-ur?#1^m~qAQJc5jG2CoLEew+5AlrDUm@Bl`))w5yle;b<73Ct@Rn7 z;~d(g8CNdlACY_8{MBXhs+HhEeW`|u>v4;ZyfiVNmeWa6C33bA0Gb->HT7_ho9KdO z{(KHc9GCuU*ptQmjz+op4Vf5sb!3>jHXZH#hp^UGow0&uK6n={PI6AJN$9>f6n{u! z71BttLh*-U>TS*wmwXrzj1pO+vO-)iqzoBMMe0Ytrm6e1@+B*_l17!FhO{9Wj8W8( zyZdUrHzwy5vb)0vcDpYrNyS&FkPune+ln6z6#B;`;2^w(=@0mV(a7Ky1sbM6(7n)er5VP&Xx=9Pg9C~=ec{P_Q3sv@l3JG@SMybY?nejtpfeJ zpSO=^+s()7!pjc3fvXAW3guXOUrDN?Obc;{R}&j7FjFp6k7|<>K{lC1GS}NpezZ7{ zbRH(dSUv2c`_(p>gD&|TzI&y3Fqj*xq1?6fovl@k`X5|~_CL6g{3cWbIu7vkkaoI2 z`YM`qo*yMY#K%ZzA7 zXTr0<1j5rU9exr#4HFQh(CGN9QzhL#pZSuG9*n^w!O03ENBya|_1fi*7RXTKn6xP> zYiUS|=JQ+c0ekK*Iy|HOeQI@xTQXp0^oT>vlq)?5~NaE zF7(7cfkc$5^UVVQRiYGgZQdqN1&@xUm(gFAw_l4z*)yo(3)t86A@S)Om zA|`z|C9j@R!nJm~6X*UT!V_Kw$SWJJc^h9+;pf3lZBJ3_{tl@m71C;ie4=>V0N8~w zgJ4UnjI@`s2skPBMQaAowBHZk6#m=&svOcM?v}+u1Vk}T^R|JYrbtio()wJu4EV9M zVfP(`00IKgqY0hq5iohF1JIii+-Ew+q2n(Dp8W-&d_=csk}XzMHBG4@@QOf2CNwPr z?|p&8rc)|a3t%!=Z%3hlztS({$hGL4MZ#_KV;hB55{71fM@bTgxd_OxMbY%Se5OsV zunL@kYb8~Ww5j2_?2S4pb``yH1IPv%fzN^Vs@FgOJm9G5V%D5G>-8g~3~~U;m>_7T zt#UiY7Ua45Ym#Gke{+r}L!tavs0|4{1`|1y>)sYmbk9@Cn>hjFuD|{`Z#sZ&HvAO~ zV_@0C7GTeE5W*+q_5&;@av93^IYVGd3OIA?ogL5c%iCD@ADF0q)qT6EE~Vux61gWp zZXoSfNG}~~JNP;G^uaF-=2%=vQ~EwLHH0DyvICig1k(K(sXv`P0FoLm4lnJaD_Q{B z=CIp|pj}KEUr5n#gqaJ^$J68k* z$lc7J5}#&RS+Jqt6tb}9@>Z^j;uB3|U2V(^5Kq-5h=4B$I|wSPM}BkIZ^{s1J1*l@ zy1Z<1CNSBiW;z@Fzq%1oE| ze+Ekb?*t|?_(Q^ol(-3gRF80X;*_c^K9S^41*hmXU(vDdlu28oP+%Kdp)%a|HKaLA!;k}%7M&qSD7%mP)H(3LQ=F)#`z1h=@-wR zHQ!mKh57buA1i7t)wQYf6*3*-Z#RV}u4*pl(&^{nSY2K$X{SlE!!DBr?qtf1^Yzun zl{bYqj%kj&$*Y+Sb(DDzpT9xvLWoY!>Wi9Y+c@p1=W=F$3qBk_+o*8ntYG-bT!Q1B@?|M%uP0s_Y+x$aSJfTU&%{7rR0CWmEjPHz|u!VFqwk`sBn{Ex+eC z7%83tKI5`j?+j%LxrSUyvdV&QZ>HRtAnZ`>wCV6q;WZ2#|f;(+PG+|X?uDAq$P#BaV@O2 z7LiR!8NROt_)RfSeyQJqo^z+(0C>|EjRuB44F~_AxE6TdN$;JvKU%U^)ZKBHxwpx{ zvNqg<*u^<3J1nLH)k(!TLpCYP#(Xxo?WoPd*-A9UGtcYseeU0lDn7ztHQjje_6)CX z5x?F}k}`(fM3&=p;U}4o2l|C6IF2({<9yt7_#Xj}>Hi-AkCKsY_4*)c{(4{ReZrU1 zYbY`sH7)V^Q^rKzzd9WwOuH%6Qw{_?-82geY*rpc_GS4?6u^Grh$^tgnr0;dN*$hC z6Rpt<0M zJbabSyEr&{{e55*M1t|L!seM>Z1j`Jwn!uGNL!Et+)oFceNyC^NVgHk?@-LGD{kny zE8UnEIV`_D^d?vBn zUbErqLmQT0_M@btLgwXsEg00#P^=*_gLae)L4x$BWoSvi2B9{bUU&&?mGct2{f4-J zCd}~oLh=^HxUYWQiZU$wV?wJA3n@Wy(?{}`Pag9XF*h|{YAmXBp)UoASyrNu@sh{; zKT$hEocKB-0CJ40a@#CBmDeM-vC7ooXGl^D0rhv9>2Tv6Gza_=33lq3tAn^C?6WVW zLuv9RVHi0URN2a>#~V9@cksrqJz4?Q&uITM@ipoDft&|*j;zJrMD-)112*>QZ-?*bRoh6g3Ji@2+y#cHg=y$oq;vX*;fe2|Ysv;Ol< zTWN0d%k94YDSq?(rvq%%MX?=?;$g(pe$)>S2TrYc^+3C)z-rze+%bwJa5k)9G~tm3 zhV4Sn$CWFwVX4Ar2Q^c=K4Sp$^NOrkJmCEC;qbAq-7=)M8{KxP(aCxW2qy~mZ|2|r z;Y-DrsLl<-p83nh8T8H~q0K%;LaF;3(<1pra*Xlkv@lk(G)g=$sSmXW%>#>3TQoN! z3lti?ev6)u_hh%}7NBv`;s9V-(uTIG7TDZAA?;gRl4fqEYM9{z?E9_Z>tvh59Q#qd zjdNCw=yIeoJx?7&?a@e3yP~eYF?L~*fSUi3S#H44%b!#a7L|V0F^}aiXHg~Fk!?-r zd%ndCC>By%FFKl7{?o_^|AW_nJeo$346!JL8jk`QLedIIQ4lX+M1IVoiMSrT1w*_& zsSNT#|3EvP6eN(X;gXXEDAke!LsJnviqyQY*U$9`oAs;%Tkw@Ks0369nuzr52*mzw z8zkNL41RaqBMj>Q@%T(p3wtGbi$Iiy`~Hu$r}Bibrc(DZfP5X9Tdp!gMLr$Wt%c*P z6x{>`7s;nXv3!*0{z@!2=*T_qY$2>f)=MqTk!>!1hhNg&@tZMVGbgA|gA6T93oy_8 ze!Aqc1b~;Xr&=FWSicd`(q95!If`Dr$*LTi^&T`!L5k=hB+^e XlQK>nl}Qy zw-TD3^=z%w*Q&nhW=G0MJ;w&e9;3zoRM*C=5)xssD7Mlq{m4Oyg!I?uXsQ!G6MU=e#hmf- zPT_WXqIsC_WS5JmM0>!0gJhVjJCr==6BDs{_2+NWThl|D7v3BhOVi5Wk&=r#HuCOH z-qjy01bwXHaAHc=;IOUt;^i<*PZ2?c`rrvtGy7SA6~gl@pTDg6LVEO~hLJ+SWo}NG z9pCrg$)W!ZwaK4KvIOZcp)u{Y0NrFncg!Ct-y?s_PbcyyY%OntdtP;tf!8FsV8qnu z!vd}BX#4=Sja7O98(z)3;r;vzx!qev(IAjtG+whFlm~$ojV6|Wwhspr=JZvFN^Uz%+C{91Hc=(oRSij(^ zpmkf-Wf5mHI2B4w7HiVQd@j3j9PI8x6%e~&K*re~YLoEqE(?=4dr}?9o7Ag^$p=~3HsH(%_uoV~hm*GGJ_GOAPk46zr^}PElU6jZu|~TfQn`J`(!HII74V5&eOtSk z5FY_Vt(tX+2;xl)hARR3$hUL(=fgi8Ksh+jKDm)X-1I%1e zy-ZhU_GA3+#Rtb@%zHaMCo;m5M;D6R}Cm zM=)rnA^1|Epf$;e1)E2#DwNE4R_w@)eahQhWELYF-p-e1d|gj6R<=3h^4 z)IyB}0w2alYenYxrH(CSI#mBPUXoAfO}Y&H*;|g&f?od6AE8xzYgoI=yt-ub(9d*# zIgqD2KdX1=2lK$BBe~A3XA|QM*|dNCoz~~P-p`thmsJdjB=NKc6MyGzQdz(FZ@!dC z&2@hywh>u9F2`I@Xcu2tsf1Qk_`B1%x_8ZWz8iV(cttZ!pvtBO<=cXo=*I&+ zZv;a&+2UwWzB#=5O*@_gzSsRUqFAm>?Wi*O?ZXIsw%|-MDm?SLy1P@-kremWbp)`? zJ~-v)&}&s1&RU^p1YJ7rl*!@^ncN!OT8m8T1@aPsJuDo%4VLn83$oXi6A$je<8X7z zwx<$%XKWP>&hDud=BsO*Wcl033>JY`V^L|PPJ(Rg zuMSN&Gxf_65~5A-qS?ZNNfsyAd6bs>{gg_60s z*9jAd@A=9x;gY-y$9uphmiq-&ACz6{N;n1ZX#8#KjQrpgXt1`&g_QruBIQf!!+ zZm;y2ZIc2aqub&rZ)_%1)L#35lBa($-?heX~^?~*V=w>8;GOhDA02uK3ebUq9ahj9pbJyg6@z6KY zgtVHF^lLl7p(oe)!nm0dbh+F^T&{da2mle_ipVdG35QDYOesFGBL7*89b$ zjq58CmBe^mDG5MIq*uK->9pwk~Mx;aqFx}FMZsO7}Luq!YE}MzMO+rC-UbBEa_pK4l z$-dv(HsGl_wLq7b@ePiSt5V$k~`dLXyl}%t#-r4CFI&B@GFV;EJM|yJx%<*ghrYM4M zuefS`nZ*FI_g60-=i8}H@^g5|M*H@vUF>e2C;xC%n%NOxe!^j$2TiAg2fIaO@tB{n zI6JTr<4cbZmj^%?*mnVRL&auSV3RXJ^NKacJs650e4O#XF%oi8ZaE%UirHp53z%EwjTgdU-3?&A05J? z28?ai-}C*pXaQyme)|Fdy2-*kSbDgqJqRGNLcoVYY*f6o-h!ThA;Zt7c>s)i^BYYRHc(pzQnp%`0|@mJfEmg1y;VDv4k;? z(oy|q8V2pw2mE+=ejFvlGyP3B5`tyaM&mRmoyZ>gqRPc*i7byD z4TL1FX5SX^R^1uCJh{O##E>DP)m=a`K%QeD(`cDdpA)8d91^som;2WJpM)^8@1KEm zwWVxqr=XwrNS*Ezno3QP<0I?*omCdj&J!c%>FJ6*#tmaQ8gvcO56?Q(U95rwbSzyw ziZH3NFEm8J>A^ZAhRgnC5UWty`z;M-opdL)#`$(|WlT34luJh(p552S6$*W#7QvL}pOXLgV9T2`!VA|B z6gHlc6Y!UIV?fnZoP3HKri3A-pfu`zf-&phE_tw{J*l&i=xdS9|I`q+1Te*&3%Jq@ z5LCYmyvA`06(v^bFLs#jSK(0TL2JSxAgv*XBxoD(g^T13r*rc~TFa{Z%{m%K#>yEAi%QCt3 z;&L~Jpn|yNbSgf9>wn|ae>`Ik{ZJ5a7$hmbZ<4kuDl$|lCSAqH@w=xu_k28?vYCPS z-%j<*VNPm_`qN_<>aQ*V$ZI}uKZ`6DEbs3Up%YRsGBLC?-#u~Hj&zx(JjKN{~p7Kyab6Q-#bkq`^gfSy)RdEj=Ft8Fss%vNP)>i7>B03) z3-0bNnaz7<=FGkGeWz|s-MV%ELJFzwz4uzrdhCaa z`Cch()2a1f6HX5#c=Q$$=;Y*44YGifb|oZDmOyS7;BR~A%&+03>$H!KK&AO3Bv&B7OxD<}{ ziRR}(lRaio&r zUVH|Fx+v5DmPH^2;sFr)?m(P_=!Qtoc>LhW7W?q!xdTBs9NwpSC}qR)sXU-LlY)?I(AC32k&|wI{>1kFb``H zn3}BVvSCtGZt_SA6FUPesj7d56L;FWam3({r&Y29-_mD{-sO7Iza}@Zcf>?s-{GM- z+*;!vbv2fX&^?dVv6DQ(<`*=R;rv&*z|a3@xj@0U@7DEME)zqwDntzb5r3mN1*V^@ za|OF)W;P5-!#h5sU$+y!e;JORP~5tCoGgngybvab7hQct_EVn0b4W4+tZv%Y@orn- z9X7tg;=wkXfm~e8fmGHz-v53yQvc7R(Gcl+74D{e!T=NA-hUj7Py*iekzJ2rQAAFO z=HqJg0Y1nQp+{->JA~%vX`cA1B|l2UmpU{ z@&5C?Wi)3`6Tmp@MmVhoXM{Q^5X%yo9p3;QTd~2Qze8q^tBUc|oNwa*I5b@#hk7-s ztQhmk~2*f8adC_;82bi+a2JQi*BX+-x#}{z| zJQHssUNic2f0ul`wgQ|_)95l-tS!IP#n9hJIq~RO3L7Wy%k@14vI;PzXscNrWp=`9 z@H-pV7q@+UQ1^fXV)H-9%-Z7!;EP_{ee`d8LN^emD&TLZ)p>Q50UbtlsCc7*I9In^ z*m!}HdH3XgJ{u9!fP13d0_>q2+`>crcKfs27)2&5-NY+%IsJLHoLS{k%RflyDDS=V zze7T2qzHVr1NlWVb296))Vur;3Z(2tj@P`;19jrCG?a}zKBs%a3*!64{Tqt+Xm4cw z;jWAuZ#w2oF3eG>dZMTuY=*Dl#jY!R(`JF3c9BM&w81#!eA_DqYcfr@Rq~8h#@lbP zsNiQ6B)7GtAI2(UMH)swaNTcGSIJO|^y!#-K0n&tl8rx9d(E`htjuj+1Ed8qc`sg` z1V=D>Q7JL?C``MQhVrg(3tPx7p?X-vw=aLRc{^-4uK@FEIykIUc>C{O88$sg3uU$> z_8oaome6MRJF050@Ps=6vleu_kAeKg>-B^Ic!#$uPEXuLlOdf{K1k>W0W<8gvKqqv3B z#lQOAxgr*x)6n>j`{$>BL)rh$Lis;U+W9z0h>x1OtlY|u2D`sybF}_?0D@}TDRwxO zSBBr@Utcrkw~>s*SDwx<%DN4A#%h3vZ{z^L>9W^aU!W5RDPGG~cav420!1+};KZ+U z$*QjZI$%Yw^EUNbp^qsu`?{Yh(4Ou^V(~sgtqfmM?MrO4Pq@O%ASE&cRjg&24aTQX zg!fOuX|$bawLvCLCMW?O;0GtH~zq2k8tF%#0D;>pv}0#%f9cAw|lKXma!iju%21&EDxz_#ufe!lN@}L z^W@F@5rS@+oJ39~=Jl#vc@78pSa7&dfq02^)^#IQ4?}%f_0J^R<3)8Kr!RIh*5&eK zhre%m{GVp`cjL&C`v}+%E4dZss^)*(hB#%9Wr?-fz<*d|KH@}}c{m^K;k!5e_QFq) zvj2}rcnRgSS9d*p(+CJT)ud%r-y>^Ss{YiYkA40X{%i;SDPBM?rr~>R)NT>3Zns|T zzzk|h)MX)YCOy@-UmR{wtZ}3=>JPHY%^PFPUj0c2h66K5!}x8pf~0wia>-MMsc_~; z4aL86`?O6RU+|I+q17lwVVRHV%?Y;pU331oq{eD1(aG^rxS_tBQS% z=J&G--RS=h*69DpZtII_h2yGzl&O;Ok>TU(Rn?<*4;|4jBd#+A2^ z>Td)-jXv(lunE8Y&KiFJZ8(tf9umGws7DVP5*JM&X5VbH-N%I; zLtC7xsQ)m+qS}^;I-MryYJAgL7W@A_~?#c~!~Co&h4B_blG;e)a6@6g$E{SqZpHc6mVt zk=Q75N6#5f+RL>lHZIUBW$+C2pBH7X$^D0*METa2@88K9x>3@~<|kS=fbCTt^=l&4 z`yPBHWud(R=D)MpRH8k}E=$e5#COfDBkxkZj2k}3xBRH`8BrsL(CN(x*X(!PJ#L!% z)uv-r*U0*MPed}`aE##qM8c>GgJR6_^QFJn`#*6G0n>*Iu%CH+H@dYoVI`1SKi zJ30&b<1i2=v5}I@*qS2OBCL%4!{f>7+(~yVT;Z_V6DfP0NafpwOd^kDZbSQAsQYBN zgBhO=)chl?eO=g!axiQGa9zAsR;kMqdG5Fj3pxZKqt{cfZ=LIs|CMLw-NMm7?ph$K z#1+CkTLQsOcEu>j>>;|_*9QF9<0pWMfvJVT)KBRi1A)1bQVNg5Q=YKI!lk4yQD%*d z*KU_bPtA5=bPM1;o}%*IuU!ov+`a3j%7pmd?v(c8@<2Tn09WCkE>s#0)ZYsMcL#;h z4%Rre3|3dbi%#lZgC%-H!8;Mzt`g^Q!#1lc5g=Og&LNyW8j;7j{UmH ztTQ0VAA1LLpbgN)o1ZH39`@f!0ksL`-Jj-ho`N>H?e)t4dJPYv5l74tLDv#?nBlns zHX;lrJXf*9@1L2Gyzx$*mtpac`*%`Cf07;e2g#C&e!Rd`hRecX)&6e}?~VeOJv%up zfzHDyyuhI*vyt|j&FoLF9*z6-%>32z=!^%yzuN5{y;|HmQd!$#oIl%=+WLHj`o8d_ zRQAus;et_`fS{nl29;^RjWyjHI&3;hLEkqItLoiKjNI0=ThZ_v5tHzY-HDFHL~9)S zhg3aN*UyTvmV=e`6RJrvqHgQTWO?H6>tX;|WiE;80Z31MH*Ht#5te7NMzt|0@<a}H{jty z#)2g+1D?pO3|=b;E9A=5_keNDTsC+nw1}DiKG%EE)w%5?EXL}n>~kCe{bg~wX_SYw zdh&^w$F}B4{B4Kox!kJkHB;^DzsJ!W7Fm7Q0Dk}v;6W|GnXOd2*e0iH@|~BZz(@!sup|D4F0*#FbK6s691=7HVe##P zl^>cFlAZ_N%b65otc!2D=*k{nB-+FGx+V9LZj*#OO{WXA9o_ZzGnCRe^mC^LKTNx} z?+IVIVC9O3Tb)noZYTHs^oz)A3tXg`3dM#r|G0>;*KYeBj{Uyjr_b+OymXh!i+N(i zw3YeB%U^~6MivbiLt{bj^RMkCPLAKy?oW}NO_I9R+M%tFB<%Oi5q5D@$>VH=GSkS- z(4MG*KjlSd4NB`K@wUO~qi-FCksoB<4n;ZDX{4K6k~sCn>?8#Zt(YCmrSASo8&v4w zcS$$2LuXx)7GlXPfIv>Q{BXq9tgay^ZZ;HaJ#vcWbDR0xjO3ZxZW?RxIaLM8#=)k2 zKU(L0FyCu_Op^N(kzEH)ae%_s-$yd0MI81(#a(%;)I3is{P({&rml?%;_zgPxR;pA zgmDR5P-M@%mE^$3?~~7K@0awyjBA*ysl1e<7o}86lc4je%Xwcq7EW_5mltOGsSC=x zD8}by&`)&(+|dS?_+~TSeaGlzw<*cATz)D-u`eH{-pASljCN`r-VCDm{a6^*-kb2a z`St@k+NJjWvBt)x+DfLv8Qe*%YVETw4Z|MoGa{-)2oYIgm9ON4F|Te@+~C9q|6{2z zg6RJbB2=SSrHY=Ph2EgI$EQ}>*eAeuqYC@F5>Inni&;+7VxsN25MnTK1JnVGd`ZG4#{0*zbSSFGs} z9r2_Y?eSZdxTt|Q)yhivIGhj$V!c5XDb2Mb=bs|=oB~Q=>i|>n2Z{g=0p?X10g?MZ zc3&$uU*fiHs?;)R_=#u`JoY#OHug_0MjXam?&Cuw7Dn_#*JzJG6$Y0zB5ep=z_gJw z72PgiL2O#Hx?!~R;#I0#$b0li%y6Oxj+?$_Ed|xv%Zv{L8pK{t-4l3$K(U7xFm{fh zlMahj52W=(uBg0aD`JQVA4O8UtT7{)lLeGn28lO0CCxCnC81B7G z%2eIAuc!^XRx`l9M35yn=o9nYeTF|-*b~@gs;s^%yr+1b1vdsFS52E)_87IQ&1f!& zV6+7wkc(ma#r`5L4j3ES3Q+O-9<&;f$mf&6B%aYg$stz=wr%dL51KE zPGhiFEDOz;gKxTJSmp`*#p-{4CiORi%K*g_7rvJN2p@Shse(Wxk5T%+h);W>xqyh} zDKqO-3uO+UbjB)(G*B9#?WT-2lh73e|GD~F9}QiJn0fXK}R9)!-9~W(30|XwrhkTntFsGUvzl^g^-7|pBv-+hrPv~txb=d#_3Na#a9uo$Z!<~+UDc@p?Cak@dqJ5v(#q4o5EMB)S65B9b8xCY|jrjb^ zf5tpDUiNna<__5i^H_X(;jfQ8m$W_r%JF}Mf()<^w>NRo@P1y~S31%Iv8}ny4#PM9 zg24sqdtkUI2spw|>`2IdFz6mQ!5lobOFXBLr-o!U9?xo^kN3Al@hIlC)(=i7zdc=N zJp`<_o)RC<@d%t#bws~wYdn0Lw+KJ5HnH_De+Bbi`svy3`*+AB@WE5-VGh`%=X2c# z2R_{g!#Oj%xChl-cxC`Pj23lBW<18@eQ+{d!LM*Mp{nxSXQw9O37;%}F;H#FX++6KF9Ca0fm-wNyAZ&h~l!_l6n* zEZVYou|r`xI2rRiX;SL`!7mLd6Xj7LF|KooNl!d+`*e_eYNqi8D41T!=j=z=j3j^` zSX6`qXK5xFXkM0!H~~0LwM(=dpHUpKeV)$Fb_HsKN3#+ zLWc%8XB;0cUK~F++e!GANO|`i0I`9pA|zhT!H?HVzDE*W(XMp?osYNnN?{_W0(?F% z7Cf{?GGhHFUHM|igVa_huC!0+6z^erbATC^*FH`xzphr(cl?1w@y(KxbgrI(KU&WbHyi3*3)MMlt1? zZ{~#eF2!xE@7r9mU_hb#4_9-eNFQy3L(27%sLaw(n(B9zLg%~R7CqIEVi-zEkFm7A zVE;Y+O%LWJ74rWr=Dv;VkF{0z+md^TY0tyhGsHVruJqRAN9!B%AlE6XTe&tinq8Rl zIlE9r>5e2kv?gD{Z&k zr2bs?jub#1VE*#a(Boe(9n#rKP=}vS{{K2*y2op@jwHhms39DD9F*KApIlG*aZGLF zz`uoy9EUR;<=@?14&BL3rs0n##bRV)xjylnfi<&L9Yx)gM|xuA?5`xIlMcQk^LdMG zV|TJHNK;wXDbk$@Q^s=Wq8-Kx%_$EOrg)^l<)=apf4*2Ut~2Ti1~0-re%f8XRGR@p z6EbygS(#xgmfQKCI2rNlA2~hE8a5K6Me}+L^HOu(h_v%-fzN>?#B~uqhqh_E^;v@IfKr(Z*+IpGU z@V%~);|5DN)X5I?IbOs=^DMo6MFPxy={@h*L^R8oZv2{WcuL4^pV6Q@E47l8C9Fq1 zrbk@_$K&)>uFarJK7~Y&^!qr7T%a;a_>s6zLGZBoF#}G`#Dnr~-Bx1|)UR~+pROCR zepg3^=h1~abegvYnMZDR9jAKg1}Rz9Wl{e-5A^6<6ghCZ)^>@&pBS5VMK?h@3&;6p zL1XFqY?dV}zD*r7Z4I7UCNffQM;7^e%Mk7I>;F;F&5#m}`=4JcVU9iuL2>5+I{gJD z!uS{t@T~RpKR5pr0~WIJ#UAKERf#B82gI{Sujn#ZgT~8nZ3ZUs^#^mfGt;XT&YFeT zL_0<&0is551|IiqRqoPn83=Iy<%Y;2!}!^1>#T}dch zX@KHCNq`IMZp5*^&etAqe{xA#xhg~jtN&*lj4?R$#zc}$V*pW^8_s3dB>NcJlU}8W zQuh`AN3>{{-+5=bZA+8LG+_-UdU?x?rIa53IMr)hg7}usl(_4~K*CjMHE=BC8{|{C zZaorrGrgQ|UQ*@otEZv40iKQqyyUE2p!XxffLHcZJxhR9w8sfS1CM)6#ms||!UqrY zR3Jj(9xMg7_6tAnA&JmL>o&|L$S!D0%2^@xKi4h^Iz`J`h~iGDO0i$I@F5O9-9$U5Ax&1)@+kj`%I&zNwtEZBF?*RX9>(h-De9Psm-wWq*wP2NvwFWICYZ zkxxON+HN6nx*QUiVh%IwJHa%FSgz=!-llWC4tKs*&&0IsVQL{$3asNDZF%0&(z$JJ z>Y1Otwb`iMAMfTZq&947+IrD7Bv87s7Ch+lqTd`Llp)h>K<`Ni#f3)s1I7TdjH(c` zryhl4LUcl$u|>9|f0*x28YsYqCnwkf6I(*&UHhp*wtwxbr-pw&Sh!OwKe~Ik`g<9& z@zLq*r=RX&QgkbjA;366r&H=AT)=ea&JoW^xb~|PDW#70(7*ekQIaJCNlpB>jU?dd;Cab<0ECf`GM5w^k zERiU(ZjsTy7I}Yf^uN1Y`F#GKr0f7QNY!B%{?hf4b<>#%c9OUF3YMJ?}Dx3-z`f0X28UM6>P5Hopg+hyV~pQw?eh~K;KOw zMY<-T_(HTyBUZd2j&A1!>;T^mo7@12Far5gxz&hx@~(S2s5kTqy>9MFlt5c*Y;7XW z{MT;N<6!CKUJ^48Ky}njDQ{j1xNCk2*aHcuKc2woEFP$QE6@~i~ z_v+tY)hW*3RE+%-rB4+T)id$*yaMb4@7-KOQzB=-bpq8t-nn=EQXk-d3Zow*{=Eu3Owm@y7Z;9* zRZQC_)DT~J%JRR*>6G?xlMxStaLl$Zw0e63Lvgbs3LKUGFvN?~uO*b?N?zyB1Gh*Bsg7|*Pbcu%ZxfGk zg>eLbRBE4m@EtHNxaDaahX>vfI&;o)oX93A*&a~)7?VtQf7SVS!Ex)Q?0r15^LSV6=oOZ;iz-qozeJvz@9STFCNR z^1KL1!?)!uo7b7Y-YGu(MCbWvfJIBsmQSDLKC&XrwdA^N<~;h_Nn>sSv&d@q{o8jX ztu3!dM-DrTw6Tw|DbS#7b;1_epCY4QgKSXq>gBjbJpa;y50Ye}TvwUr=(=M=G}7NS zbaOJi>64aRlw}fJPZ%F&D#pCc()eeJ?IBD=w928pRu=W zNc@)-_C2U_Zskk4DB5hM*XT~${cj?n=_^J7n-ga}(NGcuZbVb0HZ&jnWBO$U*nfV5G94ep zg95_x6+et?ucxC0HIG$;-MbKbzh~mU5?mI^X-!1gqWKPL7p4wsRS&t`-eyU) z&dkp4@8^{S@qp&KP@$e-IG-yJVWJ3NI+6#QV0GJcrP~#LX{X5WWU2&Cq-FfT6X;KP@pcr##bqoTH@<@S(AB^{W=W@HnI%YA8`ClmqFDh@V;0V&Z?EZe4xmE5$?Xg*M9k*rsvME~a9czSzW#1%oFYA4sCc3FsU zIaUzVuuQ~vr#Ssf0tiX&eTWM>t-<%_`Q~nJNo)-XpE{IggG6gVGg!v_pdwi(2;|xH z)xp=@>dlRfQ%hskFH8;_gQ;aNq%PyLIzdRYXJpPmT( zT~ZJ&^-=$KAUpiE7R0BeF1?E#U8zdjxxLjg3Q_}zk0JQ4~KOufyC)yBUmv{n{Nnv+P|= zOLvVwFoi-DN7`T}TzJJWh;ln<)YjHEAVFsu^-~(B(HA$|vZiPe;9M1+#XO0q$clU{ zO|pqZwgosz7$(WWs4_HCATIPV-F}~dR<|z@)T&IcgSm3#IZa=(i*VkiGTRvP$+B7B zKC*Y3#SLAB zawG>*IOW}=J8rNv)Wm{&jEU-yv_=~5#fd>QLFzC&@yi=FFz_3194ff8us6B#6sjJ` zz453S8I?PeR_Ce}dQK z5YTYin!B|{ax7~Yih&nG4H^Jpc*2H3jEO2c9LO2P+BCb@g$pw=92(moWz5VlV+MPo zB#YL1FsO&At&nWZB6~4zSbYX7Ac2}2F2A6E93gp(=;1*RClcB2Cxr)7jzx&SsetkYvR92VbXiQj+$Ow##`XRh8> zJ5>bp7zeZLVD_650yJ0T?99%T6f3&ef|m!H3j#lxYktUD>zCPI>`r0eDZJeVhE}O3 zFiIEBAi22o#gMi6{CSs>o?g1{akOY+-Hm~J2_jr6m85o*b>AG;iq4}OA~#=f;^*g|qIZiX;nux} z<0zsC@R`fINb%sh}rtJi@lAj%d? z{k_+206PGbR0Nw!K6yO}CrEX3o2*dXTdhY}WJQ3gxL$>RfwVEGgBDa#a|dMd zwP74{3B8zins#7mL1}eOL&GdKEm|lyL>5iGEhQm6v&Pr{Rs9zgaAOSO1LB9*9inP3!*demk14|KTw@WMXIWeo&4m?!=N40zqOhjm34dL8EDlKoL9d_I$H;Nr$sM=MoS}9&+zh1oh$E1B^C%ObuL5A zqA!Aqz+Ov|ujOIIee1$O*56_Z??bth&x`qWGltTdBXo{V7a*Gu{vC#5LOi!_yid~1 zM|^-dX?PdH{71qs7;7e|!Vxb{>lev^Q-~{p)G)~^Qb0Xsq(e?L_XM-(Xa6&nx}v3d z8Z&$}W5NFPrB6-&5>ovyi^4P-4@%kgvJB<5y32DaKMv5R#S)azlaIXc!Dq9mxNen( zl5*LT#YDXD5&3Lr5^1#ield1qgoKTpnm!#o1dI72R$_dBNy!Lz-G$J2tPu2zs4#lw z2Pq$Uk~vRb+WA4u0N_XPE*0;wxDw0z2L|2-i}857aW$e+Ftn}gdy3Sk5f{2sd<|~q zGi$*jEA2<;g)uoUOTk^!T|dNFw>3#g(&s-K9uqr?NiyA)oliWBa&{XV1uo0!^3L3X zO>aelk3*7H{v4%KYU*x^A{JH(=?`Y0lH&fqx9tiJBfpR@a-@+D&wKGq!`2Z>uNJ=a zq7z{=pnW!wau9{bPV|25Jb2=AO-UJ#Y_OX4VK8OlGQ6h1!K3EcPxU)LUptXe4DTqp7yYQr$}g2mdE6k<(*@CD zp|}qSnZWkPIZ|BgqC164tX;1Y+5~&7aw+ zlINe1G!0vv#)st^zmZh|&NJ$K8pzRfo}YM*oqLn?n)8tDz7a`!>F z4&$E)BZ;a1UKp(hpIdd`C0@`f8VIq67C!rUX1dvF8j0?NzFQ_|8(aP|7fqHjma z=3}gm^~zr>n205y(u|`R4SsA>m=}&JklopP(P_^y-ZW6-GJh)<8*V~WrPD1AnnGOChUQFnZ4FC1Jxrm%2Qap0CIh2;xOj% z-R;^MIve!k(0d9`lo$X;^IOy8o9UjdNMZztUut(AMG%Y3NafVfuKY;6*sH8EUWf>z0zh-5s*pEb|WbqV7aP_}N^VsRWRD8p*H96n>!?W^%|_zO3j&x;FQ z%zwZRV)`^D>Ofi0X9Zn#9X~p1Qo#{8zNY=`=!%9l88-++QOi2BDAO07F+XLW^s&M2 zCDiRy`MQu^1DOfGG4k<4!`@dy!^y)gM)pH+q5I1B;)Pz2F=z^OTYv=ve8ogHy0!GM z!6+Mx_Z?d_)ly^#A|a4k9vf($av7ts3;tsg!D>j3CJ7W7#^5lN}x2h5hQap z@q+`1l7Zo~e!Mc;z6FC29ULVAy@r5K+UBLI>T5J-})y z5D@x7>k`0Riper9hE$RHN~0H9!mfpgCxC`VU^jgzZz^6vb#dU!<~Ubxz{&Nya(-a4 z#gpA`ybxIe7(tQ4+iV0L1Qn9;3^qC%?EoVDXF_%1pt(~-$X1R!&I3S5-b<2Q`j88AkCB5tP)Jb?+kALqS9)gvtTotJUjq1~S!cJwHl<*1P}q{s z8n)f966^-Y`7~e!kgx8z{X%0-6cB^_Gy8=Gr#Z?ZAhb^aVnP+4EVIU^ioQm>Db)rk zz%kYbwe0wTI-5@cIs;mj%2p#YEO8bC?i`6qpIQ#AspPf*AHrmz^-5Xtjhco==@3`z ztDl_rG@>3XKXXKWam!v&fp!2jQW|IiU4HMZB;k8m^wgRZbOt2;;!nw<*Dkil@>ZAWy_@&rUTe8>wjK#!`)CxE2k@-sP8ZU)FSd zp0{nrEt?`6|0{FyzijjW0d*)f@IeX4n%D-(v;(_6ymkBt(uFCAPy@*7$Arr0Vfe?7 zvQxm}N7KuT=?rCek#A%ZJj~|umXSJ7qx9TEuJ4n?fGPmjUw`lGw$Ewr$jH=8hjq&^%fQ+>+x~x(-ze%?-w0r-cY2D~-Uk{3x+5 zqbU6R)YG&+QWBIBA$AVUr^#%OAmn>vfjW|yT5(r4Mpp5IaQO5p z)pxxc=7(xRH$jeF!Tb6%balN>GZ(-HgWB2@KOT`ym`Xa%e3w1ah|1 zS2Z-#nLT)n;78gHfjVhYOA_(_f_M7QcQ43|7y;oM zs1N~0h&UF8T?B3hp(E5;fQ4D#=-Y5aF>TH4)nQ_SqYEP)paadu#Ik6x9EuwreH1LZ z7ck`3UiEs}FfJtr7`4!zzo<(@?J|Bqj*+z-E(G-KnFt7TgVev5(S)WiibF&(=0bms z)Jxi#1+f}V0@d!5j;DTM>pj-hL6DA-DLdg^H#{eCE1={EUh^$eUO7* z3cmCXB1EiEk%L$udig`E&%OjBGG>U*a_EoOZ)kSQ04_z%I+ys_x;EgPF}5FMdfxL@ zX%k4imcr(4FZRkqCoHY3wjo@YK?>6e5_2^ca=#rX%dow@H1&uX!65nWa9}t8dRt3c zeTRyND_ZlN#1D!4K4c-ulTA7=Mvj<6m7JCs{xOUiGEFac4zSW%AQ8Kc!sk$(yZ|mY zqU0lF$3mq!3smB0IaX9q$}u+P*MdvVK@0N&;hC8CU9zrS%CMKm(ztzHB%p5*U{&wQ z3Ipv(yt^Li>!{ulAly{gS9r+f7mhVE4S2+faC;EF%uj^tEpTbf*{g8j(ycSGwZ{q$ zZc#zI=k3zOn;=HUgve-sr)}w`y++HXL#4P4?IAgaab4qd@)B=IX|j_yUhPi34B8@% z&VYHxq@K`^&Zi%tmHkZsFu>r|bDF6|+rO5gbGCa;_pLU2xXLH)0R8i)_wec^4hwr@ z3~X$54%Xy9WWKah(1>7nFFLr0(kb7G^X|n<$KZi@O;Op4yUIrX+k2M{w0VtIo2gZr zqc1Yq)tY=!OGm)=|B-@=Ty{PJm;z-|l=2%T8mH5_rDbBye7qp~k|um|Wy?dvd?*;M z8GyW_C;M_f{!ZK0I)ofoPLTS}FEt38n2Js4H_1;F_w-Tv>3~=X!=f4qu=5hVEuYmyq)Hp- zs>OL5tR{;Nu{rHN>FTp;>U!THAQ4wPdFv?3;olh0icPomi-{oxXJomqF5|xDg8UB? z^;eK2x1*Fi8yVN_!-KF39W@Qa!-j&c5zE1)mTQbsw_^BH#)xd})g8X3tr!L)!yC#$ zr+&wwN?G2pA=*t7A?!$4FRb*^4%X5|BcAePssmv*fK* zUb3{+65_r*;(lFsbs#s345~)SM+3PIrBz}%9lnFcl48Xa&o$VynU65_6eEC`22p4+ zpV3l5jX6kBY9>`df0r>;abZe$en{6_@}NXIie+|)91}=+Tech(feazS5JCT3r8C(9 zlCcY7^nu~d7Y!qM{E&o}vV_wb)PuMXEa?NBih>L1DT+y#lY7wX5Z=C&nzI0-6;m`i zFIC0iv(G?<>8NXP+iGa?xuoA1W4gt{xZt7jEboK7j=}px$CMw(iKFoBjgylK*lGDU zXH{z!RG1u~SP+HJG_(@?VXs&<5DX@80v8|#o?*U+oO6AN7=jBOkNH>f*;trOl}mhb z>JM!;*>;y8vzIk(zq&~fN-?#ko%V45{7(kHeO~WVs5*Nk02=-(2 z!a0LhY1U$eGpAq$c1B}o;ppLXV)2M+&OB6+%l6YdPFrfzOZK7q$&|1Zh}zJb{?;xCKEE zXk9Dc=!{_`EhLFei36^ypN%i3e_qglkLwY?JgN^NtR4eZLpNPg&}@BuoP2XzG$9b> z0}z#eLuJ^5<7EcK5c|WWc>HB*`auK&@>5jrO2puRqHlSFiMBW-@NJ>_hIW+z3?``%|^HM4uMm`{GBw}rQ zmH=||$EHy-bFTH_X}cU9qtqcy0X_3NXm%kk?DOI#k`K9;uBJitQ|AHVkHl1r{3hTe zS&ZUmfhj!-gb@`|EAe>Ffyr~{!Jk@Um#0U2-$F_Q0EP-f7=>jC*#C3_K$*i&-nA?e zNO2)jH-IhgGduZAFB7n-BF5~C~D0k$eJjmzW z0NbC4VUtrXqBRhRG`3r4X;|_$F?EgORRU!^1$9%KqwP;7PJ8oKYPcwPSXB9DN8qbR-@hBcnE&xdN0eR%XQdZU#Jq1k85b9Gb^XO}S zAR?lABwD>d!9iCz%iUT})2bIcqvf2%(~mt*U||m(O9{c4EL4AfFz21{`%w790Y_-` zV}T9t&~jf1Y#!v;<{sR>abuTPlj?;Sp~z|$_TGxPuF@)EVHkXwfYJS=30-B zfvlylho7#7L!@&l0Z`q2mpBFpjt!yGFXAn&ApV!qvvi_SkaWYTsKSyd2HQa?eV$c3 zR?&}^8jj>&@3Wej@>JcpzxGXA^^0Ei?*=SsN@vP!c2=1573<|XmS;XIzjPNO(p0w) z$>9ly36>8FqJ5PgANd2W&u+>UX**U=&>5kUPuVMB^K-5X@OpRhP)@5HW2hHf60$AL zDUDsyiSwCE_m>7L?S}{h#k?#gok5cqDYq1hak9?z5!P|D$#%yG>8pi79jK z)SK}Q(J_{Ibpja~OVK%5J2eu^T$G*N2*D3@FWvXU`ntkh1eV9+K)I)0;y zRu=ZU4Fp3mkM;=$N&iT9QQpMj15U5mL=<%)bl?L%?gUio`XS-3IFSf*8>AaQ(J`*$?7$u22W#>=TqO_RddREWx)TWv%u{RJ+>>9SU>*v57%6+CWp$#yZd zR3M_U%|&_~7dBLC-tFlMI|jXAg%*Cag?4L!h6)lHPBLhPPH$OIgUdqz`MG9TatoiM zy)pmCk|n*~^IkkNVvRtaTwO=@d76)@q{AI0W*<+#Ssy5WNwmCfL3BT@Iq9veA5o}< z<6wW@PTA!3Q2l0ulKi(g@$Wl7i@L#p?R&`X%ou3;a+C56j2pJ(E4e{#r@Pth^9XBZ z6{xUpQ(2ocQJ6eCF)0xevDFm(xJa$7llvq7{T_zRNQ>-kfQqD}Ii8R5op6hzG{ICk zrAqZkR3U1I?9V1SjdB&Dogziv$pTY>hOJ7$;PzUrJ_&o@O|DzdgOz48!75@E{=dm~ zt$lauGcsR=tE?4jvGT=Ve8#YLLGVvrtW!nJ*dDZ~NzL}`jzzg-lC+=x%~-D_is-|d)QY}} zW=Un&Vc<+`m>m^mQp7C=>SVu(Dl`k5;(d74?_V*!{=&x*Nl}y4 zs}zG`+ikG??bT)=#t^r%j$uUmd ziS}A}LB{B~jcRan?!HZ8j@K4}4SVzAhdvpugj>bgXKWBhjFLKtS#7o<`DEbNV z9`3+PH0I#}PN@vMtWH=JZf$4(m*+@YoV#bXqx{ThdpOxDjurNTOBAHLw-OR>E#qyH zBW-ope;S@=ph1X2KDtYwW;Vh}QL~d%#d{7fj@<2TG5M_AVkgV=@NMMbW$aWFzgX{B z&7$k?w|3dY3JZqE@p~IVxP}zkWCA^B;`&x0+{vzz=2cj4t`(MB=e8G5o>?U>6Thp|lH9A(Tv;;n5(o>OSyPnD1b+0Dd1k1lMYEk9z z*^VHZMcr{QdB65mOCQ_xee$olg8-)!!YlaSpF{cnSUe=@t2Z5n_%EDeU5H84+ZuSg za6WV9mHznpSA^EF4z+6M8NP{b>n`v43d}Htl1k?2vRVa~cl&NtY9!ESRn*Ra(M5w$ z;?lPUXEXQG5g!kJp4!XB^hlLUyA3*lKf*i?NZm`UH$fRq=C!Js4hoy=yvl% z1`XXM&c#xsJSc+dafR9Qx?25SQZbvWZ5^x3svpypB>d zfz2yIl4`yF5?!N4qAss8gn$&N3ux~Qa|YU~bnaa$VcwAYA03U3^It+AcubqL(n;df zY7hitv`9!ZyII`Fzv^+z=4f`4Mvu!#s+xN2PO3zD*HKWdZ2vkqiyL}}w7*m|RGRlm z&lG7;9FZ`PB|lm{nI`q^$-{psk-zPpj*=jYbI)cU5moZP#UgVgPg$XS8+x;~la&T69! z5HvA=X;VSkPY7mL{Xcs0tgJh^ugq=_^AJT_lOM<>zsYg5^Kd52nqQO)zLRQNe@z1N zrueEbzw`1>;Pbr?*DCs|YB+`S$ohC>K1z;j zbyU_eTb}iQEj0Ddx&6PfRYZ>x#RHrJgs|UkZf7sYT|ytIptoS+BppD$en%uDXh^W&cyZL-rPV4++lqd4srFQVSB9j z5&TAQe|Ih<2_-tPY0MJCEjv^(_o)u%T)K`m|)2W z9-4u$p!k-PPbyI*3R{{_2;xWo>zP$PVLlyb2A@GOV^A0gYz-*J<}cQzxmF>JaMWuw z9)okb-=GwkZSOjh^xyylKP@oH7Kj_){vT@hCY(0`J1{{9lLDV%CZbtE>>Pcke}dR9 zURah(C}9{u`T&>-Se5}}>S5sJO|+Z(QoM^DOml0Fx(~3#8i@54U?~Ts>|v5Q3;wxE zr1mlqf4~*N9&k+m81WLAKrkiiXalA>mnoqpgd=gU`>rd1@N3MhO^w^oDBt04kLCPT z*|YhpWst>_-`3r&_k8Tug{J$;U(x?Xbpmk}_PS{9{C}hAEZd^&!ZkcYD=kWwfP{2+ ziGb1}-QC?a42^VmcSv^*NJ@7%N_RK=dG|i{AAm1BthMg@yv~a<*ifg|PHZv65ls?x z<8lAUyVb;8pwPZQKQWEneW6N>!-re=EMRyD=b#;O9dA>XySvu)t_@F|cZI;K;b#$Q>%df&^)p@-3HkQ8PFhPK85yd{mBG>LS^rR3 zy}wE!XPerCoe;ZUC0Pu4@yl~-rufY1q_gLqKh?oPiLo9P5^KuS*{()CJw4XIi%ljw zfiD`)nP8g_gP`!r;K~J&)<^(*_M$8*cU!=>&CtV)eho8~W4v)Sw8od(GN{l!eRQzv zlrIa`Z27zIO*is+k1&DyLK4;UmFI?<7{0ltaU$pgyXL@D0wPei^!xnQ{NMhcLVC@a z>c`aau1CsgOy;i1pH`skIS!O+!aJXLt~{~b_jl0G^%yy3to^=iY{|IR%=bQA*6_nI zHJxI?r$s*Ex+LpLy*%OU2CJa;d3pK51blXUG!?OQYDG$g<@Pw6J+NFK#Jt{3)**+oVGa(WVgsjW>^T*>S3P}9C^gbXFpPphcu^r4Qa)Sb6+qI8wRADm%_=NgFe z&}mHo@A1})A^Lp)w=MvjgSrjyR{-OJ0wm?=APR)6pWk$c0Gfh0$m_EkCvm49AnLYU zqXw2oxJ3avq02y3d9fwMek{f#Xmb_I#5L7Y0tC#NlCNjQnrqq}k?L5eBYp0%D(XgI|L? zK1GJiQlCC8L_(jF_iGNr&06sM-ihbPhtRf~ep`gZ27iQaOY{8|Xrv7^3Kqb3ZbtBC z2rxD`9$yDYYqt69VB+~G2NOjzE3*mm+1hUaN&|FiMY%xrAa5w)X*s~SgVhCNEt zfzI!t{~uO5vFwkSnpi*Mxqv?daR z9`zkF9`f#Ecd(0lr>EBm!!dIHirt!yL7AoaBKF4SwPLKcw8oU8XyIqT8ovt*b2Bs6 ziR@l2jirT!7~{t*iH1#>Muz&G<#S4XcPE9lej{g`@`qcI@35gy(>{?F^TZ+0L~ z@^&OLL8n|G;^;|AM>^=CUyDtyd%Z@2;koPDSCVC~tvax)JYKiBQISz;uX0R+(YXUQ z>1EaIEO!1XCkRt~$0^eY%T7LJ%xO|3UK93n_EqT2Rg7D${F&j?rZmhqH{d?--+q-x zGKZ=Qx7Gld)G%tcIJUeaBqAEuUt~doHcbKmewT|&-H%7$)^lI;kR{ZGCwpgT>CHPj zIH=!sYxQt79sR)$m;n*=9!n*DE}Q`Hy0L#0jb%5%=tQg&d}%K)UPAEQ@}*qu5yZS{ zKqz>3*z%g2z8cSs|4>c)uElcpa~hBj_7!bq9~X#cYTkfLAu z0;(Jp_O1nB|Hup132FkCz4D#_9RFTD6g;Soj&ui<{TnHwN@fUg*9+4`IlXoXG3t>3+yO)+*8SO6ijY zs;9Ct;IE#T{TL2#IrH|O8gCd}4|$3&xj6IRXt8XqkRje7ZJ$5*OXh%?rFOp32P8Pz zZ{049VB5ocnG6u(~sffh(H>hc6E+_sZDqtb>7a@9!Q z#Jp(>%0d2w`76-%ZtrW%CoaQZv#N*q3kLrtZK^M{#``e$J_t|jeBDbH>P1V1O`!X* z%~@zS+r%&~_Tp1s`nyhsyVk&at*Y_Pd0cY=_N`Pt;i!tvLEanBUww8FlgrmFE^)|Y z0(_km8{p1?d2g>ZQb}xcB^rO*7M6B~>NJ%RJs~O?vvUe_kcX&KGTvm~wWJn z`3ZzMoy*?zSO<@4L5RhSk*T1!`Rz<@4SOiLW4q7dSA8<(vU-DCu;w^HPI*i4<&z7V zcjdag#{CYn<9wzr+eC%+uX^cHol=U-6`r$Vi9^1dL&UB<;5peLTtTZ~A~a@!C;2BM z*8CI0)J=DypA091rfH$97v_YC;JaDV z#*;1#r+CM-1$NC>Vl{)1?~lRh>xb~^{nKws{ce*~1`sRWl^9is_`-)gbl^MLULxo! zjEK^4`?P#cZZ!Qcd4m-W`FWD|bD)1--+j4FH)G!-*i_}{milfsW!qKrxy$UevYq0= zOf84hv*^Fq+MwZmyRhruXB5i>XJqd(S@6tU%C#7Os0wvmKZ5^e-EanM%tDI3GNR;|=|KepslgadYxl zi(Z1>28Q#u)Kz+!2$ouXFS|2=h5PmFFl!^M5sa$w@UC-WFvz6AYCiARF9yW4kJ21` zOC@iXb3}cZQmd2MwJ%!);O8eyC-4w9j>%v5Kq*KHJYk1j2Dz*Zd5AI9wyU1n$=tEg zls}R_fp`GlA7+>S405F&3_KqunA1;*`UAx8;`TTB?FWRrp4D|Up5I6b!d>}~wEWPA z--Ew`Y>mCP@E`P<5BA~kd$(T)QGyr;ANnX9$iWCE(OiS(GbOQ_R~Vif$wZ=aW4$GQO?WsV4dqif)F{+0@d*i%nC#Ai$_cY_W(}u^cc#b;upxK1s^G#Q@Us;BU zTBhh{dT{UY#LPW%ZsPng@pXW>gb)oK=_s`7?rh1;BcTs9PLPi+7A8WF(-jvf?mR1< zdUX>yrXLf*SoL^+=+62Ez1&WT-T$3Yq?IW+pjy>38MoP%tC%cjH6!)yQZtSHs>Z;t zosm_uW|RCidy5Phz$2 zkj7-fP5NTLk$q(#V~Pr7$lEeqo8*NyH2uX=t~1$0YrUw(S&EV$S{ngO^InIFc(R(3 z$IGRr)&41pqD+)B9P=d^k7{O1j=$J?^a>7oso(D`RXq~K-*;e(NIIr(=PT|F%^59qXL+@}B;~MeP;q$X{*i`l z2cvC4SZLe(CYMyBbAQ5HJ&daNkef7Na@~)wvU$hXE3pqxq3-`0Z<-h|n=7g}-hR0{ zoD(4AvLX8C7!irak>`O1;`Vau(Br}&JZmm@!Wans~&IcdGq(j{P|pz))LYSNJT445KlCvY|>yM#f$9bUJpH&A49O43v0=J23d z;9^q*5`hMSWL)f3xw7-LL%`a$ZSW8eOz=-)egVp(Tu`}zNaS&r}?Dk+w z6zq>}>)T5$u&SL1hMo_7!p&mD*H%Wzh+=P1f7|z1{7gtAc8_2}DW7)DL%)A8ju+%l@-yc8RW4sz7AI}br6JtDLhD2GOoGsi^RCeq|AMkse@q4iangsu3giXuUnXvXt z_XR0h`(?8>=Hn^2Q*QEdsT~DB`Z%B3%D?~&Z$*pQB~Q!pO)V8K@~h4QJaU*je@WMJ zN{TvS+3`w?w22GcuX4rwR=5>8!3JjLoZ0#JO2%M4$DbZG?A|osoKRUBXciHdwZg{7 zez3Xt4K2tV)UdeN>B5ZPj@UI1oJ0-#Yzk~3aypPM_#If}%N)=T<)Z2mvD8x%kE)Lc zB?o;SL<%*B&ij3Yi$l^xBY(FD1G?6y(`Q+04L)iO;gk2^YgDMaqY?cD$0S9lWE>)R zfi$Bw`~Eq@WgF8Dc8p< z#>x!-^0}|`hQqR^e5qZn(|M16Y=exE{Xibgb;JTcAh;xvgn!nsFBE}J&-G#QZ}yh} zPnCFj1y~dk`NNxlsz*ec`X|~+*#SvGTs;11HOjky>`Q`gv%KB|5CUzhpa0+nUGkWB zKTN2OeOt|RiE+Kiv&~6{iQ;10K?qh{8=Q*p@9+*Gz ze(?%>{lr9Kzgzj0^h}AO=j>)L;ZPxUL$=?7h-^V=MtoQwfVU~N-M5-yzHt#ayPhMoQ%yG1%PD$B4zf&j<6#l~~p`eqM-$oa63 z)me7<&3U}4@swnFows3`=zo`7r*R1)y&F_mY(l6KbQF~DV&Vl&$;O<25EDW zaZd|>MD;aP3{%H1lu<75)<&IL*g^OB)*TzIrj}${e`$Pr{bJj&oxgWJR(ho{(B+2Z z6XA9Q@U$ZP6d-b!r#mh4zOWB(QN(Tz0YP+k^yz6X&`&OVT$3^nOoilus42f}>P4z1 zh>@LL8Qf#kA5jD}`i#UGgekp28Z_NVQOfFGoqK-7oB-sSU)W7$%s`C_e)#|}jG6qd zqz|=SP^`{uTvdb`DrlSzh@xfv5{@aMoRhw(8nw;ZYxzaiO~< ziurqhWL>$IToR==^DkU`2AfHkW(vk791D>~Bd9($m!9hd$m8oOCk>ljGsKc?C;3urpSSZepEOIru@r1t&y zqm~YhfP5($^aR%~{v;7aTv{9El8qJ3X*D+i1pL1MlZNQHko$m2AjIL2^dK0>1Linc zhruqeUp$??#k=mc*3Un{k83f*iBj4x=HOdSc7YOJPLsZBcHDzHE;LAeZlpiHvY?^a z{LyGLb3qkQq7c-3Q8kD6E<9CVu3B`BIdR8HrS`)#iN}2E$Tt`byBfReivD##_hG+m zmz@q1mI=QuEz@KFZXh?^-AZ43)$)VIO1TEwoTl@=iTaX;#4Hs=z{&elT3cQW=|hZP z7N;qBZ`svO_c=#(yw6xyQiIVuUyUeL=6qk4%a1|*8A|2I#IWI^Vh1xS zzOb|hH6g(Zaop!?eRQqYOX0s0fn>+2a<oAdCeUw%6}l2zPRNnexhbmyfHQs-0f(R=!>S^zu@9 zNt_YFZ`XNXd=*O_*}6}H@kI<{9|d@sXKFp)XRb~?t%*=9~Esv1O`FEaTyDM`|Rg#R#pcziHm8d*Udz`hZ7ymKr;<+w^Z^=u$r&TZE^Cw zW%g{en6{t}fNH=6_H)B%$5j86DTYQn8I0xi}CH=j<#)vm6 zl^I{8lNfUQ!tu&oBg9=le?!Kd#2|M9h%JaNs57pw3`t!VM9rkrns8mIBv_)H9&hv# z#n`AB%$MblhtLMHXHu{3vZIH}7{9C1X-fj6+01<*GCvZ`Ccb-rA&L@hMdy*AzRLD} zDSbl3Wc#_>zSi`4@5O31oXo7|^oC%Nu%GN*b1;|% zl?qe;4GE6{j*w`V*7R6p}LQ3*+MD0B8b{Z zoLSvqnjwRn<8Ko;R{);L2oR9vYMTEp$wb|Oy#+JG5ix|)}k68;(%koB}ttWd;a(~gU#vS3V417*qt+vu^ z^Ua@--G#~QnZzWacitrbhmoxrBYi!%ljSz5`)8oc;wy&ig>TE`YD!Bz6ny{oE!NuR zaMIkPL&i`Sqz%dMzV7C5cUc2Z#F#$!Wd}zunX%e>7W+UToosWhYSzsK&g>(+fUQs7 zHlePy%>&zU+J;u|9pt<7wuNboyW0);;N?#4C5eu*!^eT*Y%At-l)mk3AWE96xUAHM zuzA^eO=8R+kLTU}DE+F$TZN4IO*V4wYZmxfSzhV;7EzV`q%69tixru)EBUCQ7ukt} z@`J{N;-Ec<_VD$*4~XL6pz#L0*$VTuq0d2oyqSKpGG8C|{4-4fdiw7;Zhr`4C*|-$ zFAEmErt55cD%vXO81e4y-6XZJP~r2p6RR^gll_dJNS;NKE_@AH9WQJLH&L0LJun3= z(np8Oqs*)QE*vL5919M!Gd@M5Mz^o=?~$gCZ_B)PPxFf9 zrCbPi8a#lhuEG#XPGA5Qn*?}a3su9|c!QsjjmqJ*Yx@ivJ9flqghEbbgc+`Q z?ZBJu$``LDOy$3-+x#qJ|2m^sK^D1W4>6x29)*?y1Lr+$Z6t-45RU3Dq+v`%o%_3T zY~mO<7pYX9&XC?8$w5YU1p|b;3tyA?Ht=H25}AROV@!EADu6KjC3Ld3*Ep{2ANihi zLjUj))8+w*x3*hOgBJ4%)aFp(M}@>A(RN35e1_?OlerMCO&lO!Wx!B7i9r$+6eLx& zfFwvtqKAD3THm(n6toK`@H-6Y|C}>yp-uuxczj`fbCrF$wO3(xNypnwP{FGNJw0GV z-usQr4qwEdUi{#dg(ZHvX3K1IhMp{r|5|oqXNN^#<{VXIR=G7R({QAwh(UfE-==K7 z*YikAIe>B1;jF{vV)xQ*ErM(KWi#_bI1xDxZ3yAtp{pf#YB;bkLIKqpn zPRnJ~*!QePuR07A1Xei~2#!WKj=n)VwD4KxOQ9}$LVjR;q{PC`imPq0?PpVm|91gg z{Zagr5t$(hW5Pt28*c8YAzP%+vK1TB>2EVunUnol5b~&!>rh-Uap-HT7-knMUY@yVU|g>`KwPnV z$}mDt_{xK)6F#G?c-0%w(g>^u+?Z#*FO5bLvp|EN~73kxg39rL8ylDF4_$D zq>wYi??_@3+G=YwDHH+L;r1+ve=fy68(T@r$2GkK=`{^{9P1)Tk%FOCz@1CAkO=rN zez;iY=NsLzzJ9H2O6%81N#xmPOFk^Vz=20QG2G;u`6Etb`H;s{mZ*TNKG=$~41SqL z<#S+8MsMelldTZ9us~M{HnX&py`~ig@z2aof!0YLgksBWY+yWGj)if?%j9AxJ_}b|SKs-gSjkHML@-$H zap59yKtd=;_mIsa(DX*wZ_XTmFw&4vuPn&0+K*VH{k}ZHemwtfeEJY+K(Tm7kt3EQQl{)D#49!iP~L`2Bbco#4a}2;`1_tc@1-jGb#I%vEQAQocQ%K zhFtiC+biBZCtDqL4m5-)`rc-Rzbn=A+GWF-Xfl`ww`SD&ZWDa{qG~0;B_x5w9aW)U zwSeCzY^>MG7ZfbenyhNa{OIvsuy54}G88xTh1A%3!3SFDK_9Tz*2*@|TG6$*j=Udq z8Y#Iu^hHwR;fShSS+gGbh7WNErQWAaAiC1Nbo|hC>>@LW!%0?Xw7oTnDBS9Cv_lCc zok;Lx3JBu;RU1ERsF2U1HvxZN523)QL0aTR!-z zu00}^&K_>Q!&fJF#<05oL|{S!<2(dC_ip!ex&-P6!n79Gw-lCtJU$Z>ETZ}}#1g4E zzOXC~b(ym`wlzI9FlJXi+p1OPO13krWrdyfEi<<9XM=+>p8Id3bTSyV2)poDk=@_& zJ~XC}&!3SInH79$_V0LCt!$Z|9T(?X{7iuUP9;;XxO1!Cr;#b0yqu?=Ek7npF9Y%o z&o;e5F!^E5^`&m{&m2Y6Yo;&h>U?4Rs%|=-A0+`8WLB$=e*{`UoWO==2uwwdYuD#S z9St+fE4Vn(|LFQ+82dJ%T&0{Ez-uIj&!8fQJ6WAWgFa;3#DLX z%tzK)qsW?2ZYfTER)Gj?ek}T#!~gQTDnv_@NNij0?>ZQ2S@qxwzA!ZccD~PKCdAgR zaNQrtj9eD+F*oYnPS^eu!N5q)NPKJymccC>eWX;EP$%Q*yk0n5ivY|i%$L`vUEvqc zzH0C#N)|9nmeWa|@Z;-EN$=E_*T9AJ@NNYvq0@$cr1At){MWht78HWsECHg0A9)3! zktf$LTwFe+A5j~BR!}T2Cs(2H_!=?k33;x$W1|rI+3gN+(#-BoPj8nY`Vemu3+9B#0e6v`9iBHTi*v(-vYish*F-B@2$?R@yn z7fswG7oPK~llYCXc?7(dJNbs=<+VOS$ADq8(^Cr3^X7o+VC)~B|74Z)+x;}WI&En_ z?WYAHPiGm5%z43$WEI_E&)BQk=fu66rn=UyQhohRMnuZ9nEla%o56Db!&M(H1 zuxv?oGl{V8JT1^EcamJq6U|I6dB@DoJeq{j^qXhBNU1`exmONm{M(}=A7@Il zi&iiU^EpAZn8Uu4o6iqob=TT{Cq~($yw6yaPIUWW@)H@JQG5AQS9H_aq!FbO4PSzj zdZ(H2o4v*A+Gn3YHu~z0=D=SY(y$fLl6cKEbF~w&rQvrGCwj>M&9?E;aTe(669rsJ z`q(Gi2kwpHuY4-G11~bg_xwJ>Ts}@O+x^(L9kUJs67Nu8wnh_Y}^nnNF0Ad?_QeV!p(xM2r4!VTcQ6Pz_2e z0zFiJjIRTtH}yV`t}4Q+A~ir1l@5w?ytv4VwnFklpB&Wt_b-*@QvFXx=f@k9vexDp zB5Z8&(KLYPvstYD*M2sK%So!Qo6c3iTi(D1q|dbz3x9G!U4ayL&h_E^KJX1L?6VHw1u;^=xY32w0Kkv(cC0OBFA*`P;A0;1iy!Sf=~I` z+A{bT5p#^+^{7PTh+6hYXeq$~9xAaA(RaLoYD@_^+Xq^F8U=h@iXI+N z*oGuE;{j%s9!z%;3?iw0hhL)G-wLRX#yO062S*MJi|gRH3b|OH5GEK? zf5Z9bkp!Y?Fvx{?Hdcc*szFNNz(b9Wu*H9fsl6q2;S2s7PU&1WKjzrRa+S?De+OLw z-(A^58y^Zf>D~Z!<43MDMXs}r)-U{9{;s_kfvzOZ5=3{KZrMUUGyc}0KYIx@CQ{%_ z?a(rtD<`9Fkq*eksPBNaP*LsA-0A!a0)-_V*wS;f< zT%-zJr_#!o`;7(;&W`C_vgh)Uz}FrEL}$xlk@QO1wg{^`S93z~g)fu7W0A#7jCVca z@S@|V1k%2h40)K4w@+gDQc+sI**N)orSx$qCcqb}wZ1zUfa4VEi*vEQJeEN_Yd>AS zSRXd5u);MXsLbP6XRX)#IklB*JpWa0h&n+7X{jV@yg`_ATankIiHm&3YeUmr65O5=Kb55kHM$+fu*x>w3#9%3?nVqIbs_ ze{!Jwv$YtpYt)vVN3zH7TDy!@x^4+CEs?$&F@KUgHNu()*@L%Kca_jC zgE^~H-`cw!5UyuSca9%4&Km9&c9sTCq3NNOiMhRYrmyJ|(tc=2uk-a?%8)Le#}u}j zEn1iY*zJZZ<-Z&VJ4t*`&J*v--= z;JX8%xqYCbvB;{jB?`=E^s#0rv3x3mV4C2)fL}M4ai~8@^byezP!FS$D663mGY-|J z61*6sbnZqJ+rQt{%W+%x(P9Pg^U!NqeF7WvqXpJ>g+)SAvRskhcChEPf8#ml+jfeJ zIiS?n%k0BFKV;H^;_MeP*(`z7WmNo>atVNDqKd?4)%J6YqeG362Z{CMi3HQ(-uXx0 zuKU6c;OspC&#J@RS;NrjVx1X%&WD^`Niv_W>fg{2QpwA;XZO4oJG{8fzQAN~sAUwq zL&*%6I=tMX);S@-i*UOiK~VVRjuL28l6~nLv~53^Amp|s?f_KuL(HLwARt|ej4619 zC7a2oVgZlx4D=(FMI6+P3u*1cp8>y4{sAZ)+<(4ZF9vE3)WN{cgkU0HGFnxH3*lQt z^(!E{*jw{D{TFnPOUFAX00G+3N&sJmRPxy2jxer5z;o>&kv9aGCYJ(UZK>cFI0cf8 zYKF0*Taqv%=o}~%cE3ZESc4XWpE?Uf9Wg4*^q+wqogb;)pe^RsJql027(09Y2>gap z*93nN1^`&B6xk6o$fvm+lq7H0%adG_8;4_t3t@zFxvP0iXJ&i7v(n90^ETaZJ71#k zJbKoCfQ=K}k&sHtdVrhSbVpShRW_$EY{3=SRDt<4S=$Pl$#8@z77cb=uoCbJ{Cd=g5m@hJi8?H%{#{GDy@T6WSS zm=Crc-V}N<@c@4UNTjIVQMD}7*W6C;VaP)&p1V_C5Yg>)${N~CQk>>YRDmudJh_<~ zGWmQN1?MM!yb$(rMt4UHCz5H;jM>NTM3oXRd@d~?+VV@v>Sk|^RNWoNGp&D7RkupA zNZxMt=qDr^;?73nK=e)ele)!b;}=g;RZw;94cWnEtM~JoDIM~pf1QRNiBcr4?QX;^ z0m0Q2G5noPe?ySn^IQp?R**D5Ppooq6^`EbC0sqV`36_}<_lbwZ~W#{H};Dk^yLAx)YZSHL5`W4O7`9E6W<_T|N;cDdqs^4@DNpEI?drz57Zc4SzqXrhN^ zvCgWA6~$|vtZlXTZW*?gB70iNA=Pk4ez+ z{qC#)oTD*@{ZYZRK%TQ%b7k!J2nJ&;L#ECy(^cCM@V5tt2nPLylJO5qf}j6)NOZxO zoyBc}<;O2!|E!8vT_ca zed5H7nsIGWnffN6Mwb%>8--M)?HsY5nOL&)P47#ORu9Lcid$!oSU*f_I;DnHomR;l zzt)As*u2qfxCRAudhI`-CzgooY~KVE2F`b6eD|&VyoJe|XOrFp?UWMg=Y0yAr&ayXYISg55_-emEO1V;OA4T zEao~bh+76|J#jf~uQEDD48+2BNepQ@7SFg(#BbNg5WeZa^~;%NLx;fYhT_LRZUwkn`uk^^hd$~?UF*Y z^#pl8!VS4c2W?3`mO$ddtGZ>5m6q?HIo6XmwUYgOz#mU!Ay)I)?~f&YG?8FC~oQJVG;p(C#(W z-k^?3rQcwMS;A(~g2>Tg@&rz|EAsW5sr?7kk&o%FWM!Sz0?F!4GxJU|o9wyYQ{BGQlhW3s1P-a{`t)4E{5)G;MtJ9M$)??_fV*t&7 z&WdYa81}1%2s`*(Duf$-z}R@C4(MMH@v-dVhwBu!{gWF#AH-zE>W}AlsJV&}afqb= zm)vkM2wd6UgXt#_*DG>J+yQ^;l9LlWOdgcs5dYJ@>cbgcgPUTx8j3!vF-i*cu~NPo zG8?-7d#FL~gCLwP)J{C(7%o#>yjhQ+7VuMIHb7mz5OM9j|INB2Viwj6<%n!^I^i9sdVFOaZ|LaKJ!D`?9T#8$xl-Qu=fh(56Go9 zF`lh)DUJ|9zFItHRMkHL&xfa4jhsbYcm?6C}XW-<4P@vtBEAeNqUl~)eXW9 z{6L59(#c>sVWt@IhTAdb(-{g2kz_W*P%e(8gkqZ5ceu^tS=y*2mb>L0jWGrj5(j~K z&gQYY#70x8otTE*1))>j^_MJo{bqlwJAWh&SIBSuH)h*OYOS?4Z!^^c)J}F}>=KN- zI>Snx#k($P$9j*)$;;(-*zms6_9?8#iR&c(17}zLCYU12R9Q~)pub>4#hfS)hfl4f zO}0On8J)5@@;Qw(7>##7=cr@UPibchpS$l`kbGGZyN=J>D-Y0=yL^(bB`|@Sh-n#)6ZT~X@KcvxOZ?d z!wpCy$not{wF0Rel~G-gp-!8t3J9OnWg!1`@b0TA)xm?IFW?8i*ghrt(V>-qB2u9Y zM)u(;6LzM313d>^xV4hSa+xaV>q0&cpY3sEEB0jMp4J=Sqj@n!f ziw>~iy8<(S(9uJkJ0jW#EG|YyS;h+_!NGQ$*|NmXpTO%TQDS{yPOnW=OEfWCKo>si zZ%!e$L!FROdoX|9jXYB#VPk$5X6?$b6|96AiZ|o|x7I7Jz3%J{ip9D@ET$@Vfs`sj@7 z^RoWFY!hVfd!yC)d6nhG`w~;Yo6hs%;wz!Md~ij*7+NxAr&~&@u>U>t!G~c^w_EnJ zba^qa1>Xso2j1wctb?a*d$NGWo~$7S)Q0DN&LkJlf6D@CO<{*j>z0++gX?}J%N1UG zZ<+d7Zk(O^(Qt!^UPgpGQxB1EZHQ>aKrPCNQdg`&uKZZUT6>i$ zhQ{VgNQrnW)j0hRR5%Aj9!{Ql;#q`Mw@%;v^A~C^d+V~TguV&E9@%kxr@XTZ1WV*0 z$=|0vCOL&27W!|c^N8fx1A5w8}w>05ikbaGh=e|85*O8$f{}37eddq3& zqnt2cP<916)|zi@xRLbEX$4yiB&i|^Ii+k~z~1CSdp2`MH6QfJp7^h4l%E-@(Vw~3 zz3X7;$eu+Nr9J-;au_U+^Dutx!P6gkZTKO}o>hu)hxc}#qAq0w0U>aN8rX%>U zQ}dp7D49z{O^BSeIUd<0M%;az6U<(yYs+#$WD1j)LfHi{Bane+fU$K?@2A>{l7XJy zzx;lqpoMg9yOS9bRU&77a4RrOqt8)A0uNTGl;gho*=Gu;t8F2)ozozr+(`WbzZju% zBjpV*lePV~{WNkr5g5iaK)Cx(=y`Yu_uy>$QAd~~NrL5$s6z0|^I_gI$9@PXMp<~4 z*URzH_A>fcEz5l(^Tl4xA8<~c)8)5^JIJ=fH=6q zaHcVmYyft&FY~Z>@_x*nvlC(Syl6mpYpH*|+Q#UJpf6^+S(R$Fufg}!XB@3{g^$DU zK*RDRYz?N0HyIG`)I8DyZr%2Gf2n4>@)3qXPdo<%`8`&6&u6N9(3p3UB&E4<9d;_u z3q}6s3E9_kip8A&vE2w+u(C{#daq%#L+{ zpy_`a{4zE_AuVjW%+}hi*L6*fOt@}gulc!bw9CDO*{;=}647~p&)aj?KKTAz3ftSu z%fwveKxzFcB%*gayCTS0`qa3rkD!Y)o{}yATxC^MFwMCd_gnW!*7*MRrU|T<{W@hN zY=Un>`4q7OFCh4`)Xwl=Zh#SKG=aCle31Y4xt1w=?Umo&*Xi>vXpK>lya&&OYVNoJ zG%7T$rMf-aD6Q~Y^r^oSakyj$v|x9Z_@`tvbu80zao64D0rDHp>R)1G(iFFG>=B2s!F|=$-X7cJ*qMOkx!+t`-vkd5u=vbZG2hW0?l*YV4{IE z8CEj<=%UdMDv*xc7HZh0lb;Q!`F7ctCpmfszNgpxk%vn7Neh9Xm@eb~5yr$>qNr3~ zi&@Bm^xC`_J8yOMUwnxa3~nqy)~JB9{m8G@x&568B*y(tcOTh3WK>G-I3`*~ie8l3 z+F1E1@Eyju@GHe=Dtj`!_oYfOvxox2ul^&*qX~53lUKc1c038K&N-lRnwTn-{xE?` ze)b>vrqQC_Weu2zKT{88Am+4LFR}r(lTAN#)Mr8vp%G|m3`syIVRk8lQhhWDe^N|K z`TXDqFVoSqLSqqbYeF9T95~OKg>)Q^T~Mb!4vShpXyd(z`cfB7pwR-kt9~vgg937P z3^+1k7vKdm*h|Zo3_r8|dPFDUDnUG`4tW87(j+a?E@Ib*%sB_gs0g#WXn^bc8*~*^ z7s5QjYB5FC&_&4R6y&`6(164*nnIL`{Ew8S3HEdm-ijM@?)B$W@?Gu7@qF3d`OSbU z!pf=rK%-vPwJyT%FcA}7^W%||Zae$KCb9yz;tvxh0dN|U>h0(RU0-xG`Cy5B9N`6< z&nc&`i)|Pn`mh3>ztw*;4Bo_1;bUQ+|Bwmg4Y{09_H_~v9ET2NZjRZTjP>FGk=j)ya#>1n^jv;EH!ljo)r&AUT+$#Wv4u}h*Ka}H8rA!|;$Gg+Am z--dZ5kG7M!3xcfN9OTwknp^1QNtBZLoMx_5E{WI_SD-?vWBjX5Us*2hEa~{+MhWj= z0)w&+PQQ7r+@>-bWGp>MiF8t)Fe$v7c24ph2imAo1jE^R{9D?kbmK0&l8J>Y7;26QwAg$Fitbq=15Hs?qz~s_VY(OcPX50P5j#^60S4w znix!wAz4O=iHo5V=urucy@qA48v@azBwp8c@`!h0|#oG1e0*PAP^rXN>E@J*%4#Z$%E z5JSgmE~~i}tcspslDG1L?2b$}6qt;LS zTd4nKZa=2Sa%}^-)+h{QFv$;NrU~%YhrnO5Lr9T7=c#*J8R1uX3Q%+%`#s$sYu11+ ziPG=V{)09nyAxs#*%qZ`D0c*@Wqit+$rM=7E{M$F%l6bA*&uxIqzMhpZ4r#j?jo2n zu{P6b2=YKF6p^UsaXDO$pwah`HUjS#}hg$Ea zTIY`+H7s{<63O;uXm}E~IfX+D*b{8@6mALF>a{QARf^8#?NQK5bN{+5<$T`$U1MZx zY~7BCvmIk1*H$tZRP+`p-OZ&U27Q%Rj;X*o+T0w=N%^-O^#Lsw`AX2<>7GrXI1VJ8 zrSPE7>Z5Q|qF_#_9NRLe%EKbw#KiIG#@+yW0_GCRc$uB?hRAJT&BF;}akdc6R&y$+ z<94erdc`23d#W9D*Xg9$m71z%CC79SGzj{WwHw-~^OuT%bMiN9?!wB3fTX=)-bF zU7%$`XRn<}j^rKEdQ_0%bUEJ@;rg8RwLEJ**>W?%yaQT0Nrfk>T(D8y$gr+fkL2T1 zKia3np)Jn4gqPJm5Ld-UrVzs_hL8dLjd0zvI=kuP2+irxicpn(1=@ouwCAuF+%{?U z&si@Z=cMt5TQ{=J`Q-s?#o;71hIN6Ay@rHBx!4~08lA8Su0t}loO!}2GC|43WUtuaz;2&%^Y10ng1enH+%r;pBCR9j81|VWwVM(=9F9#tP+q3J_mrn*x?ZyaPn`p)&_$N5;b ziMQ6q8>*Y7a#!PGpbLc5D9W3wD8$3qlTiE}D3~{mxGuWX?NUTwueXiPbL}tgin#Y1 z@#d%btyhbzHD$oGw2PyAycJ=zswkl)WL_n?Xuv{lPd6**c!pQ>1XB?Eq-5Z!z;tB-e=Z?Jf!TT6_Coc>n3Q(UbMT(jlB zlN3pNY*e!}9ff#3z}lhunIC0|T6HAl=!jjvfl=YTRY5nSpdJ^+9Bmgog}FkFFFd%u zI++hGkuC(s^bb-8hn{w@ulTFqIg3r?SpM{t1Wf{Mft_A zFok2(L;{?Rc_+l3P)$4iJBsaJOn!Km2m{(1%vJ zO$6!%pC+4VGIw{G;4FzUiF-W0BZT&dBZ}Bu2Jp46wu8iF?pQjhpgAh0(hpl&p)N;l zuKRm!3;~-eo1neJ)z|O>?}ab7vG?}t@}WGHHcP4*^_3wv+YxgnA4ctoH=w25$x3D7 zyWo?>X=jvNTZnbm4$3Ky-?{nlPoF!kp@f`a1>LyY5&N)Y(zd=Q> z$y*!C_SpdHkiNaX+(0YScv2*PQ^D*!+keH+-jr))8Qhpd*5xh>Uo>4~bKO&0p@k}5 zgHNYIMqzY4@xyDXV_Z8bj;w3iMd#d-WqS)h$_`dyXjn?qhHM|T$Jq}rEPSO73?K_P zwg0WO?a%-pM5lN7beWw0;~{4YVksvr;xT!0@<0;^X3fhlBQ7yoVB0^EAogm&8el&L z&b9f#(_=7T3^iLkbU^CC&gHDai?Y22^WB~>$JD82iKzdQk1Ymb$1*=O%){2?=n?kZ zjWE*7DgSB1c0i%D8Fw19+=hBQ^&i19hRCfhdF<2q;GxsJ1Cc{LdIhfV6C$lSEWiMN zV1Gh^u^=Yq*;LHPltXSVSUpfVoRD6q{bGx)7UPH?pqyv4gC^h=NGAU-6S7)-E-5Eo zg-qsf*Ip48`f%b7phWaW^LHdu^Wv9E;5W_u%h>1L1?-=1kE*(?->k{gD-~IH=Fr&f}oe$EJnX; z3OEmK#?))(vLR-1pmvUY9fMm~)7!rnPS4#p(JJ#zja4~`Dfh*Sb>2FDX;&3!y3Gu8 z>Cn`2@yMB%aI`fQhQxy6aE26Ke}S`CK4{LO^ru7Q#i2@T&NWV+sS0d66Dyh0%zREn z4CLVfT=q)?2ZyV?z&ah+Sv$o>5baQekhVocEdOgvnLF<_IIKnt_mvWC7(BPx`9aS( zx*IX^GzXLL!!AG1-ylY!fihFmq~@c4@*&1*sqBU3eEUP5%kg#$x4VGR=Brs>q1eJy z+cCjoF>c=C;*_{g-)U0{ae8sy|-h%u{k^@Rf*04Sa?r zx#vJ7je@?(j_UpZ$m@LPi)naxcost`Bg%q1wCMHvtzPP#PC(q<#1%S|tJJJ?{IJ^S zbLV-(aJi+QjEyGF&d#wau<9Oxi^k1odU{PRJQrt;K&D==AdZgBC#>ahLQnEaRv63T zMYBh>uL3eZ-%7^U9`9&Hkgz(T>jHp|fjxGXXWWFFn$ zQ%w|KqPX%rQ5VDZsr7#Ygp>aW=bW0~vQd?|j!ZSy*ck6?%`qHXeY766Kj4J&x=(26 z6LExJyPhZ2aV}Igekl-Ns0ML6{OP6G?Wq>Hclzk=Z*qe2d!5IM?zBVcJQyUBc|XtP zzo!=-_r2;iLZU<1ZTEuB#uWIDQWb0kmMF4)DgB*PE=%d%NA@JMGCb6iY(=$?sDqe` zC8pg=&rp7I-%|g+tC9~GB6p|l*kJR?*OX3Mt4Fn!TN54*Ur`=4+>4*;1Onm)wIxhS z?ZLl!{%-W$jU7f?k`#GL{tl<07PTZGYt1$g!#3h{&Z(?RJIU?``x1U50y4sSyJ&h| z`ha*HAMo!<>qF)xFaNo9#0%`{WLE||MQQqvYL6jw-+Z7J1;^J>9p`;jwWB}aofNBq z#dgf{%xVXbNCxcY2ua_DVPrV1E}z}LB~a^u+D+2Q{kyaw0*H3_P2c~6-%%UCw} zDK^TUMu=xq!a4N_yi+jq}||MEcbk9q4aR&Cq2xVug0% zmrUpB=LFD;lUZr29%~+VZ;}z?_8s}Sak@?tu-fr5B&-H+)qV4D#7;ZO@sE>l2=|e3 zSHqN6pZA|Pqp1@?(b*J%7m`e(qDwYHHvlV!Rs&x|%IhbNk{MSixPbYnbc1Imf}<|f z>>!br5MOd=y6 z#2(JN_9s7Z7sy3ZQbYfdcC!h-YW^=zY{h-800{4%fTK-b0#V$#0PPQpG@FUI^#uz{ z7!V`&(hE>L4QB*|$jM#;T@@prSogr%hlk6t{l?vFKB1Xyi*2ZVTYA_tYU%%?!aH!) zOMxx+>rYwJ;5ym^khA^)(t~9RT5%x*kVA=O?MSaJQN`~zU&0%~m&1JghMiBh&=(cr z0bb0Z|HBqzjl+gCAG>U~zWa)6NbRc+XbkhpL=7x6Pw`GdF$jeM;>0fc8$#?~yMIM!C=WvcfVwFu7eU52roMR*W)}*dzG_Z z(1rZc=Qb!j6?FBF!qO3UZIy8{nXLsfXu--W*3H=87`0O$%d;Xt-UtL{qw$J7~GD)j3YgVh* z(B<7o4xYt@boANQOUaO`)U%b8`e+7d=5EWa+PWl>>hAcj&u3fy=chuQR0+*`r+R9m zVQ~*8wBF5^SPHUP!W2?oe5y+0bPAj_qSj-62!WhAW7rUMtGu{R4aa#=#mmFw5*TpW z(c=dUvZ+(<{vyU%as}fkUE^7N)c}I8Wkz$GZFcgys5LFyy+z!my)9F&jn9?l*GnGZ zrr=fufUKEYI4vBiGIJ)^xv6|xix!{dE7lk!MaMW*(#Gt=r-g(PRH_$Wzz_SO6__M# zYgG?}PddLs7D$v-nC65|H|tDJu)T+`HS5P+eS{85`$9Nts18^q3=XvkJbh+&=c2B{ zYArbw?o!UW9er>8IF}v2*pem9#XD+abDTPmpPPz9#{Lig{&PlLZ1bCs(y>Or`mU7pohA{ELI37UX|91E;^l8+_}PY1jEmel z3J6IXUY1G>`qV4XKh+bnW|K{Xw-fg2J8{OD)`NUgR9+rSFm2vHb;%CL$7dAMCHv)k$6Az3fulY&T^C2Tz`L#Jb0t3^l5(|*@ z{n9d!{gLC?ORetx|8ANi_Km@&;i>3=y{~8axE0?i!{WH<1 zFeA{HGHhV^aQjjjNjY`2eUjAVKD^p=GQ)w?)j0p7`RCxOkM(d;D}1JaO&M085htEY zxRKVB8?uUOOOrq~z9@LL{|gICq1%^lES5&Z@MFbri$DQux~X66LC>hbuV;t=Idjj< zj?1{aU@*(xDbDpA^PLI@M_LxdvKb{W(NuK-ctA}JkUVrQ;?-)Cn3n-(R?1wXq)$${ikc`-Z z@A}M;o$Fnp2w()p-;SE-!NZa#e1~S#0l&x-`oZuHBh_TI6Oexs@VpqO@k_iay0Mylmu~o~&q@_(%Ys zqxH~5y6aQ6w!RhojE&8p=@%8RWj~86E9$QJM=pzInguc;0~8Dnev(2_Mx4Ro#q-M;^x$o+69&Q?Cu`PS)%v2&V7#@rLCgy zVDp~i*4Yo95wgvm# zGvt#wr#c1f+R*E0LU^aa9bX`g@2_5~n8JvD!ng}pZK3UFWOHXk-evP9O#Sk7OrBD@ zdAW=)e!7=6)Hw#0a$<-#3vEtu?SjwD&e=~6uujZWyR`^b8={kK z)VmPShc({7n8}~QnoyDg4*7wqYEW-ZJadUe~MV)oJ#?&p50az2D0y}^_j z#eMvvW3-=E;bjmm@axz`UeBwtVxlhDg-)YZ#J#Ek?IqMBp;4|0i0G$1%ho4~i zQDaBi-P9d%F=gSO{%4Q(#~Qb>4dx!n?Jc)(HW!~$uj!0m`kEY#%p4?5=e}6Jk%NB6 zA5blphbO}(SN;0edx=s6B?L(yOHyVDtj19URwi*XnQfaGad&dC(@om~f88)dg@rZl z$a304YaPqfHS1C4v0mpDtBJAP9u@DWhwJRyZGvOTPr=H^-4a6GT&SjS zrD>0Re|I*$IR~ya@y}d`Ga+HMCHDQ>gvzT%$jJ99ZT0aRpPv2l;>RV=8W8qd+a@Pm zy;w<+&Zqz^<<_VEzG9-SJj{6Wsu&ye<#Rx*N?H=<&60UDf$Z0Bj?FE+PMqdCG9RJs zYv&!Yb(oD|DhtXRsr6PQ0uT3?2UA5OvK$%|OtDX# z4uW3D(qQMiNVY8kJbQ↦Q>-}L5K>?7mLN;C|tG@P${GC$t1NK~fNYP<-5g`5Df z=``MoNl&QrHZ%=yJ4kGI0kb30hgy|SQb&vA&(0&32v3Ynn z0Sq8v8&8rvVTY|@mc+HRDn_HxMtlsD=)XCb8G^^R)c$*xX2b=g#bJSmiD+8`pUA_w zcOnoc0FRlip2q?BAMq2k{F+O1xqdZ~&qgbZlg7@-x&=|?qC`F$7`nv?kRXf!=*Vkc~(J?&^Ow^FHE1jc;!t8wouKi6TjM`5}i z_~k`bjf*(yZNz0V*ILMOi>Re&QJ|2Mr0s8t^*%aPB4#LHe6Yj#t+RA`SV|#kzB4U4S!|6Vf+FKRHjwWG?mK zS5Xl#Ym!P7dA+kN=4DqHuA9g%mD6={sIZn&yFae#w@D11!aip&z%Acy4+9k?dEe#K z%d)K^GTR7BaLFR5{91b0-Rum&=a|d}e&%caQ48;CvKZCQuL=;tEd$yZG}CMkRD?Q0 z1pfw)==mG|8mKwX$B*+V;3C<(lyaftC&Ps;MTl=z<_a%MKekKkMcHhPPkRmJo<6dP;IkT5nk;1QE?JCRbEX{G=p@_S$~mDH z*5uLGq(n3hwBMhdFBb9$USxjgKKE#oFVU*T(sP*zb?L-qPuu$diDCM7I9G1&G`$0Z zr8zP&OZBrzUjk+Y0b2jSQjA7iC(51~doCGNz*!4mT71@lL$B!nCK4HyxVNDZv`IV1MH*@HlTR?4Pdz`w4mso~$~1C19~23C zA|Hhdqm}?7d}6su-9?fbDu5&)cN!-$2^m7?Zo~k44nK@d(ZGfQ+a{rp>h34)#eXH8 z{C5`|AIJq1HTDQ!pRKi>2APawQ4eEw^9yz+S(wCNbHrV%)%{azmQ;8i_m9pq$uY+; zE=_cnX~w&+uIM@{K7h21!asJpOFTvr16k_)y*y*O8J`fj6Bo~=44G9t3+z0c?aq0> z*M}*(cLhGHuiPIHukmt{Sb#ca>g4!sjBH8l@MGDlz81_c{pzn(W-rk!K@S!ZisH>) zb=5Ie+WU%Hgb=RO+o;>l6CeHMkjunwq$%<(CUeJjE4Pn5_*&F;??~5Bg`tQStMyd0FTR~&D=rm*3Y}o%HPSR~_Vi8rA?tL_Q>mA1LOMZUrlqDMk;&A@ z+cA~BdE|z@g+!W`BACnl7Y3E>vJ$8@Sw<~B>ZN*4PNEvV5Gj0Zx2JCDMx(@i!(wlU z&|WgsC&z6^cg%N*{q~z`kNoc{IENQa634Ijc$G?KuM1&~>pwjuNhiE*RXM52yonSB zR!94j0dvQ>aSs{eO@h~kRc%B`1A4+MvIIU`3DO7~rNDt1@0~Ww>2ejPv5wX>lh$h@ zp^V8f+DxDJ^084qaJpKri=e_c!5i7@-Q<33-S#j+I?IQ1PdbiHO^o#HAz|rrA*+80 z*}k5uwHW%EASV&Lol^J|!$gIu>we^x_D3kB_~W;?^XG3?cuYlC#PV@#2@($b>X7d8 zw2O&jjXn78={$PE*vI`09WS;vW1@1feyLq9Ui|}3!~rs?m(Qp8{T}H^!_P1a=X`2I z+r)z1eH9om{=m+g#Kc9TkzFQSxv2eR)#=xX68}i~JpECw77p)Y7nE-(`St<*GQk1$ z!uSY-PKuY?i~6J5N9Z{4BIOoVu(=!yU(kYdC~lB^a%fl5>V{S)hn->a(9~7Z_a7lg zx(uqj>uzKEP3eS>*5;t_Xi=*@ib!u?%ds;RdjstZIcJY9P}c`c3#~!VL<6mC6)J35 zZ1)|0Qvbubpp%SRf$#Z(9O$3%(PZrapI2?z@Qv?)A-L7nECTuVQM&`8@ z3;KrhRM(Xak(pV_1>T*P6U8x*&AC9O1&K8RRiFX@0ZfZdYEmj|3Pj1jlBdky2*Sj- z0QhlX%1=omBEHKu1nIi;Tez2DVm_FjWeBjnrmSL8{mH0aHI%~1_u>VPQw)Yul!Z}^ z3{}J@$Z#6Fp!(3rh`pO-C8)0(*d5(ZJC6Ep@NDLWw>}L*7Q`Fm| zVo<8^J7w1_6leZqdlTF-leJxqJ!xVAoo$(hU;*osoB zyD%>1D~91*9q1#$Q7&c@U`T38`1HG9osUa7!Td|cKmC(*#i5>4_gW`5B<HXL+Na#Mbr5i~5 zi;KsW(NNPVX?H*E?E}BOCEL!FMm6Qvmnj*KUax_z>k8z9un_vQb=#Q|u$u6?snBq~ zjj@DD0Suf}CfY;c-I^bPS=JL=SZZc6O^L~>#-9+zf#wusN6HYc0DIl8;WswXdpAmiI)?ZOmEp$GGu|0D^C^EWm)46O;Qk3L`b*1 z0}9>;9yT-CQ<&a0K;3->tcy9ypWfma%C@pt-IO(-!0pp@E(TbGuf+&9`h%eb;YG53tZe`HbZCuDWGu6 zV?zqgPOd}@mA|AFMmdM_9ZMDU#Dbdm^$+GAlJppFk{2hHeGDJOy{>YB-ExVJ%DxwJyd8qPlz?mB#z4ux+BdhQS=Wiw?n((?Hg zm}X8&zMsO11jnjng%j0uOsM4bdH2dYYwmRW5hJ}u3GZSCCP=U`6$ZUr`>#W&d#=VC z(6#F(u4#QQznM}pGt@Ce7@TvOWGN+^8>!MAAfC%aScyK z`FxRV1g`Lqhm60KNbG@J+nJQC8sRrL**HlnQ^$NldOa}|3z?`otd)0({)+?or%Kqp z?1XK4FuzygEo$_?yCzJeFg<;xIt#UnDi^Lg3y}QhO?d;d3g55?Rg@1=v}r9I}#4J1#WX@DW;m8FZPC!=j}i6 zt8OpvjGEkhusGKkK3I&cev+;RFR8k#4CYS>ZQeehS@KudZtlV&hwy|q1>lYpCg7Pm z8@7X#%5w1xb`<7nKE+8`biIFV7U8<5%Z7!nJ(`y1d7lo!LUdbl<{x;CJBWI+-u1=g zZ5E~{+udyoWST%ftAT5Wo1l-|x&JB`88vw`)yp!t`*Dt6t;P=Gf9SMwkE!0mI(Z14 zCO~DhWz8z)Txa5t!n?~X154yB4whIp}Quy8OK)CH~W11^#pCHlBDyn#5piap;g^xABRv zk)(9fGpU2)HG^J7mORm!g7sD^7K&jyEKE(8UM=*713j4mO*Yuf+?=-aYXomPIrxYL zlfu*KM^mO8818$4Nw=P?J8Vt1fYaiGbpgnx*sIjf+|88 z$N?M>j@1;!9kvL7S!S3aNTj~(4UEv?h%k{0e7iUik3grzHb`&*9&RX-QYvRD zuJ4czjR4vmLNLg7(Zok4q^^{8me|Wg|E85y1D6 zO=oY@_dV_Bus{RVGEN{EoO3I-q*$fU)}hZE=er zPm0*mV7?)r_Y^zBk94a0K>CTQwRb#k*05x_+f&A|*uo0luATiE@Q?n1GmxSnOj`eH zoo{Mw?Nx3U5dGi!Bxq(fwqJ|5cPxfGU+Yb$V^H~)Fnw>I4_&|dD3hPmI@=(AOEPXp zpM=Y*GW}wo8u7o>Q>v5PSY=&<3lBnZ=g+JL$kn;F*ZB&<(n1To8@onLMzV11cN3Sh z^_7ROj=e8iGFaov{a^OU${4fOSLFQQ-k8Qc{Odwa`9RA;a7Z65;C=eq?y`Xev$GfT8SM1!sL zU6SWK*^1&6Q_fcY!M5;I)U+N<$3hwHeyWYy(vmf{WLysGBL}|RnK~bB*4ds4Zz#Mq*h6k&sME8f|Ijdtog zCgik@CW3-P8?wXY=VtaADZGYF!L~C|Vqfiyjl6{GImbADCJR3-2LzXYeb=bn9q<0b z)4V|Q(98SAcmit_8q)|e+4TDyGOzbTPdR#l3exgn0eoJh@N4nvfN@dXUBY`P+}deB z$(0Zdc8uTTXXZ$$H{FWsdIqD$>Ja1Gx_t~aB-UU2QcH41nsc(tejgh+y`4$N=Hb6{ z;M~{HP2Vq5PrtZL>S|tUP4@J)Um-W9`cG1+C_!dCQ&lHUVpx1TYn)O^tW#0$--FGX1?u}{i>nwJfA-x=y zIXuP>GQooX$V)YC+A{ogESWY?{C>31$@teQDvr2+Tjl48XF))A1G5<3n_%2tN6q(p zwYYL@R{WnmC`%XR+{FweZ`-~|vNLEvH6^AC0#{bT*P{rU_nI3kW#}CeWx%QbkFV}nc{;N<7TP4SeqE@*0Y6qY!uLz1 z$)fw|xR@>rmDlJ7!82Uf+x9D?J8U>XTe&b{GLCrnH0~St6V+z+rRf&OstM(02gtP-Ok{^ z)>pP34{O=a|3oxZ?tf{=2EAk0qz5aob!y9EjFM4NO%`myizabhmOZ|)HtiEdvW7Ur z@soIxgv<|0IMGXmRj4!VZ;l%gSCcWi+GEz4R_T2cgZ}qoNxeSlaz$5VYEh!aK8>Y!r(X7FpZw6}~?`h5r|mliZ+1^4!_;lVW{-4MI6~ zMl2wZ^t;8XV(#7kV`lDh&=_OYLv$Lf&zSY-1Lwr~`J|864OQxkA2(56*;RE+TpQ6u z5F+KV{Lm%i$a9mO;Fd=7*fmKM-E5+~3oc4vJoTt+t*FuRsUVX`3NGSUx!4RIXMijF z=HivP!)W@kvJ0NNjnDftc&BIF9-7ap$@TKAFY$Nz7jM>6PhbFC#?a2*;yXK1PyAn*4&; z2nA0kI(sVP=a79X5SC@M$^Bj}*zz+%SN`#xFFZJ?C^%p@$ZQ0f-cs8?gy#!alDHVp zV7}Ps))nr4<8+e^0OZ2xFwt;5AwE)=2(5EIe;XPsYS@3|Cl1%+#bj0RqYGJMluuKC zEIvB1u%1F3ouv!w3fd%SJ?0a2giGi%;AU+18k3|XoZc;sIZjHF1@ulsK4AWFixFiJ zp)^b{9LK{B4~(6W{1#*r)?RVU`OP5z;vZe5Fio!!R|Ux@NQz-Ugn8l~eeSvWVcjXU z&2NxVHx8NOGlPUdbmJF6&t+@KPigvFnIE0v<_M){|6p%bZ0G_a$PsEkmy_iMSwyEh z3`ahn`-;J3T*~aEe=GWxZOxF^mY5IRfsfExQab`LzDDbKmBDb};PbB+xFz>dl9nXT z!LHaz6{35@Y&7bUXDmVPi$PgeTMt<^9IFHNic|%M4gF0o1fbeJ>zCU2OEZ?6Y1*Z5}l z<*6{3YkdAD2h*zp)#4gDSEgjLbaR~xvQ=+iAn>_l`xh`_d@T5_+g+G+BW;YklBb*b znrI#OTNy@ohPEWW?M-QvwwyWKA{9zB7msn<{G`6x%m?+%S*xH}{YnGS%Pr}xW7o(y6g)vh^Epx3n*=$DD^U6m3hvllgTdXe z%Dz(>l)lcDS=xn;3=wlg-s@efu~l8IYc$)cOKO#=Kz?lG@pdPMh|af#Ti6+55)1_|fb3uonUn2?S~=Ha43i z&W2QlvE}zkAlTGYc+F2rKyWMow#&0sb}G{D{X1&9*}&myqH)itjBM*D$*uteD%5Dq zV`l zMM;pKwt?<&dCr3rK{rFh6jxUBxB>ElifsFJLMapM<9D5%Fl*xU{;p`TS|-p0wlw*5 z?qrb~W6~*=*H3(>akNRwe5QyGa5V4nV6n!2%{Oo<$``OTd#}PC zx0#*xZ!Gj3E%rxPX%XD6p~^~G$y~jJ7{_nLxoG_`m1;O5qI`{>COL-HIR7s)PI;Z+ z*dWP_YbOW2JQm{frTv%q?*)OKB%o(2C?>v>n4^=GH5e>1VYYuO50?-B#ycU{tTF4i z7j=O1kEw7?&}Qb>*55isf!{oFF(k1!3JTq?qi^~Ihd%ofdSP&2@G{rD7*8O^eigTe zbI!YHe?K0Nd#q+>Zw5-g!@adF5>_tk`%xVKlM>zb>0yMWD|epYa$(bhZ@+}C^`y-? z`yJbCa%2hS-q}t4X3a~Tw;JMwj8wR;-5pHn=M3qRwzxjUpOaYSbNgO5j2T6JWhzYK z*4R5;F2%ClrJHF~mTwL{baZ(>AoM7&rgblM-ZL>#uE&|-AZIC ze_hU9_h`{d=W}MX??qLqVMVLXEi*e*RnuqFAfec8fdV>((bY@M^a+bRr=`CZA|rOH z?x=-UJJH&n4t_iFx)cAU$#D7+m4eiOuTy8uW9Ja8y)|O-@xAli!gxt~PZC_P79L_v za{lY{3Qdi{Wz!YnF2EqIdmcwpbaE{dcgqOr_Eybr(nxLKSJw&eb=8f+4#|3!^=n*O zG#oZ?Tc(c#LD+O+S~yjBn&(Rz#w>-1$NYQa(!1oB7p#%Urj z7iy0XTL7T*YG&*XzC7_aZ=fJb?;sF4?Z_Nc5|3Mbq&y%di}z%$)mPVptaT*3-Rkr2 z%wMHT0#PQ^_A!DJZynQAEZ9w1alv6@*J_71LDB_sHUa{k}V)!t*cLq7cY_(ny<>kM%er( z2$w0%BM>CvjK#4aE^))9iAQp`!Gr8tufJpnJDa)qNd`Bzsa>q=blDq>JB zPQNmiPf-sZ@yg7m6Fa98E22q$RNp3q-GDp*9#%@+Ql}?tvBgvEac*?^?cWxR$RCcu z93`CYw6!B}rJIt}x4?cXVdA4#$Xv13z07<+LnBp7r~WjOI^v% zD=)+nFVbtwT6=%m4sM#D&T}Qf%*5NkuQ~_xx&^poY|qIezR4Y6%9g}2h%eS`5KVGs z0%XxJ^4{8EO`n@=<}FTMwqkl{a_i&G($%QwX__6RWnY;$_|-LOTgA(S2_uj)2xejo zDqoodI!p7?^t9FDcYD9snx5Z5RyQC&Wh{Z7QJ4=UEqjhn6=D-3gu6F-ZEvl?kyRXM z`Cb8edo<@A!PsOPelI@2qX@(SXB<$6O*_of=RgNcBDsJ$=6}3NM&ZnNKgZ#Mz6gGO zH6;J}0mHA25~DbPzlYi&ah?4-PRu}d=wxM->a^BMc%LkfzMwdudu2eqO~C3Ry`8G; z^N>={_avvhFa09vNvTi5GW&D6Nb-u8V)lK{b7aSz4=`0jVYapvUn9-uo53EJ1WXBw-+7fPNS?d{=7!X zTdX|@kc`a>$r5itjNTb1QkxHB8?faX9LcDEgWF&}p3g(E1s0 zdC5rvcEtbj7D)UwWLmmj#b#l%E@tIqHY;ZeJKjCQ5Q-6d7It!`NWg38%c7eF;Ab-L|eaybM4#E*rCu%BU^6Fwx&O6>&Q<6Qjv2nggJ>rl_b z`B~DqcWlYdOdURle3HhJLhfPYx4 zEvLr@7sjWKZ3A=IHV0oRcK$fSeOW4)?b%yv-_7~wI5$)N-$4tEiU_#gf&TK!a$i@4 zs#G;*i-u&g?$%+=&^f8VSV$M-*Rm4KIdR!nvIW2McY-UwXmbx*c1`bg&uC%MymLI>FucCwqv0 z04)XgAChvvu^}n11WDdl8yhpb<&V`-eZk)F1ez_H04sjPVZO7_o)i%!?_GMUeYZ~A zi(^Q{=n&!0!fw*4E0e=%T`%)x2VIz4w~KJng3kZpoNbq@4B+vp952@LePe!!HH#_1 zED%cESZBA_3Al^GM()<#0TWg+pjF?S)W9VE*jbPFHcwRwSUbQugSxR9NXBx?8kj#7 z{nLBpLK*eM6oz^aYRf561dehvfwrOTldehiayu5j{d2q5uUk(H;Y^_UjL$XdF$*Pg$a@E{3{|a^>re}&%`b4 znGWbcPH1P%Px{NiV?eksEDv%ar=p@2pF~4Rx)`l08U&yRku`-fQEFxW#BhQrka$Nx z+|U-R>{Gr}n-RnbmRaX{Zs5B}VOL;H;9b-Lv7Ei^!)R^m&o(KVn6McgdK-p^ zm8aq*`rkLp;&oBQ^r3ljj0d_31GFUqbpji8ChP|Y2IBXhvZykE*CclFrOP~~PvERe zBpcB3=V_09eg+6L|I7y+xcrY@`TuWKCThSuOwEOD^z5nDM_kLeD3L8)jd`|XrjT;9!fEXqpaN^0l$_7a5E`9Xw5gyCckcd@bBjSGb zJ$jH`X5^yWK>$8iLqHf2qn%GS;J0VCAkp($vaO%Ul?r~glSd}<@jvqVJ`lDckk8#e z0kE)FW$TSD8vqZ-=-%FR3EATs>XDJ&bz1$oSyb>FbJHCcV$C8|0on`-O3I7~6G#o< zpGacXHN@bRh5(Ab6uZW4N+9Yvf!PuJ*`69`)q~YOr7k<$bB9{H$)!XqI7w9`k_)gN zlD)j|A_AwKGgbejH4%J(@)(Dmm;Xn;yg#m|74^SgTJzZYHxgD%KM4J+2Qa#>uICJB zGpnkqlz)dhkBQGgnxq=I^jqdj&jHx>NR#oeS^%dRJzJ_}Y!iUf+v)%u_NQFu^b~+_ z5;33+xhiXFOz@71_ykf}NeT3j5hTDSxhVfR`oCOY|L;_a;RN(21}VOWK#?pHNf0?O zUq!++;f+TjeItEUnW{Qb=d4c1M>+3WtB7fr5IQaIfL~OP@{9&7Gr@Z`0}X$5>PZ#l zAXQ9kQp?OiE`aUv=_V28#9yG3SsKqTkd*-5gc3*o!;;-l13#KvN%a9{9DAZj9V&4t zX(n|kRcR0&iM3tRtAHO4rVI0PZybWG$$K@F~9kj>hZxUm!IQ zbdqKWWQA=R{7jS%$XUCt%rgyQR8VppWc5 z73R7ObXt{Nu77|2gHy%SOv2n^Iz&n4Y4a2j@0+6p&3*$yml|_R_Le_r{oii%|M%{X zOR|omi|Bjy#^M{X6=2+Dy`#j|wyBy;Al{ryzMXwAmhEH{jMX!<3^@u_RtCG{SQ2zn zg)rQyr?P;LZ{w2vgqOS~!W{Fk%}b(vv__fu3nJ|1QyX)~M2Tt^Uc9*LF;8b5RXk+E z1R~K^tZRmzmnJ|?EU3UA-J`8B>Jpq}-nV*rMgRe{Syo^*+DGgKqdVWodMuOj(vY*rb6AX`CfBDO{DXr!%z=G1}`2Bo$8gvP6Dw z*;s_X6>0SvCrl((&tp0VN*)JiMC#T3eRz39*n%whQ^tq2rnwljf!jMf{||L<85CE$ zZ42WPJi(#y06~MhI|K-X1cGaD2<{%-lVBmiNg%jOS*fkwJ>ab{|KZW!I+BZ*qY_i z^MU9i)c{(CF4pZKI|Yv2SOCD z523HgbG?#pz!4RHXXp^GrvgZAWW*N7yFp(XHl=NT_g({h#lslYzLGx0lp@`L9C0?E zJ$3_3!oOq^eOSlhY%+HK&W%Hwg!cwE)Rj;XMv5k=wg_w>@_3?0oQzZCJl{I%Xgw?} zEG9SOB}N*w4u9F^p}(gK0lWvLr~_^Z&Y=zJ1!BEnxlDZ|J4lh?uDEjuI1JBl@x81% zX;EdLM|7GiiSEejBIRKsJd1Xsp_IZ-u8~~*uA2U00C5ND8e>&bi;f+68^x5cixFy~ zc7%>uu01q}(tbSW zEsa`t_tURbv>~tb39Igwd&+=#?D$+Q*9 z0AQRm)LcH6MvpPKEvj(U8Vli|m=DTAcfwf>#B^psW?Dlmeo6A;Ik8K-Ura+to1DaZ zLAnxRb$_tq! z^>CL%xst1!Trst${r>SYI#0zxRMtiT(Dx6N^{u)!MZ6ueA)gFc{`aWqU!hdnAu=NE zL{pWUV)-6H4o+9e=_~j$*n>rPMnCZK%u)At=RSOiBc)(o|f zPYS!gCs&``icnJu%xEm#*8NMg|zwy)L+3w`-jGiOkcxA+G(^@c=)le$e@0Jx4 zx$)vQeNqXDfc3t_`OY|fPx2MDJMI?|{z#u+i3&SF#VwyXZ=Z^+LdfT^Cb6DUAA6MZ zpe*404*1Xe1Buh>kieUBmxi0 z;sBb`Pht4Yrz2obnfgo|0#VN+{2PeGq1j_kvSpt`v1->{ZW-cCx0y6v?=!0&reRrJ z8G%ll13XyeH$lVd^62&1h~QFc#tMUNPglPWbBLOpF0Vq#h(8wns7kN%9?&@c}* z%dqYxt0yEPdQSYdiCB*EPS1S8GYf=D&@qyN67gcizIQv1M!p@AR9EZq^@mNY!_p)Dj+ z8mdg8C{FAC)T4NZAXr-yyDe-k@i?Jda4w0irQZ!G^JL|u$ z#a&4&REDMZ2G@rsNJH{KIf(5x7?-wgJS7F)+1rv(CefqM5Mto=pagk*wNiPfz(kI&8aeLj{BRciNfW+^_#rd5G;lzCaga4G*(m*Z4o^7`4NI%K%3 zR-@Vb{Kbkk)5KgNayIKp-R=YKs>Jlo?)gu>Ix3Ls7)?m;Sax_Svz{^LHQ@Dp9_SqV}7T!d*KSRHP9Sll9V@n`IJpp1#?`pHa#^iAs6&^%6kZ;>`iS z{i|dHtl3JW2>LR)6BqPB=r`wJ>_b(|*adDX+ zF+aOX_z=71rTgy5^~*;TGuV_eLHrxnYm@kuhR3cteg%vt?M$1Ow4TjuBF`4fdzAU) z!&k$nAhb~>E-^Ui@1pv;&d{$ADoDvx>a+$IsA~vJB^Ef*kVpQ1K#iv@z?%F218ODHdKu+Li}1SihE653y_KHTcfnEpDmb3t zB^~xVo<9S+!aU}mk*0`vp3t1>dxE*_aVSwJ{3({t_Kg5c>L+dd*J9=iPg(!pvAb(0 z&TP7VBR4;gJ~$l{m-1NV`_e0v-sBXFz^DYCygH_SHJSncF!#7?BP5<#_4bw;g`-ax z--`yc&a|*bkD*m4L!4MWBKngo2ah`kNRj!Xv}4iXOI)XT1ryp$NLdYwdB6SKl{*4n8E7-Yv_ZZPR`2{lacm8YW6i@9le+eBXB5en(YOAuUeve*wEBrzIsZo(qMziflyC7eK_tq@qYW z*gDcsUkD~!2T}G_0dR@SY4&q}i+2l`Y42=ed3#Nb%`^+Zq{mwwZ*+&DKqvCCzWtVO z82jFx&#H^@ zeen^r{^khE7q8$rafE_yug84GXM}0oENR z5&HPK5%N@k9daVc+lmU)?ng5BW%>;h3B?g3Vs~eg*d}_d{t5)q8S4_bj@AHGP}iTZ zY%*R?H}pyXRhDDhg5>H^Yk6E7Nj_jwg*Hn=&CSid1GHiP0eDoBf2H98R31P%nGPX+ znuMa2olbb{#rw<+s$HPt`FyJ5z~^W0%*2D z-+;{tAAumU?=__b`{Vv+-jZjS^Gx>Zma@m|k|<6eXk#!!ut5_3O|*gpsv=fnG{3EA zX*C|;s}B=1C3-m4=-fnwh%fCrNMt$a^rf!UGR2qiTWDN=3upS~D$IK2BWIqd2?pVc z9!W~K%Ep?CK2kKpFNqhs(BXwiUmh&0YMlbys5}L~-E(JP5_})z0LLVA7$`e79f#3= zl9CQS5(U)q$e2d>X%W@_Kx!L1GKvs6L5>jiJJ!lET1VgZKkg=(1e!vuER+nM5Qpk6Pde1xV< zY7M9@NDRv3alQ+74HrE?HD0c@*OR%5a@>%>FiJQ`2n87<2j2q|@;hLrL@)!oA?LF( z&sjmoCVjaVVN+28iHiPRRp((p#q&3i!HkSV)7cS)Z=v?!z$gmZAcVEvM>*YN7#Ey= zY;=xo5!@Da==#{?&IIfv>C6#*pH-VB1B1&u)la1kq~sOV`ji0)VqHRaP%tmbShPES zLn^&%7Kf}O8)@c%*u(673 zr=yZOjq?RrA?2e-C&QG0F9zw4V5=JxJ+-~ybp6BrDi_B)n;9+slFp?uDy_gLH>jM& zLfZ}`$JD~_G2XS2?0;|rTp&qV1s8;?4bSo4x6~GTpH7hdEnC%gNQ#JWa(t5WNMl=4 zEA|y*;97T>`Ly5(fsiv7)$GA~GjSz}uw>s%LE@v~jgKw-tLS2V6;VhJ1mjWsvO{zw zNVoF3h}D39YPg^bn)h>DYoenk0XvBWIx!_itu4Ekd+mJC2(}(+V*c?sW{v9gK#=7@ zTv;${PmWlDvEf)a(!IJM5> zi|#LI=7-Ws6F|dxiS&z>)t2rg7`ZeEZ-#shku#>7JSz^_Z6}a}{WS%m!TQhU7GaS; zzveZMNQ4NmAF^*dDoH9=Dwd|5 zaU2bEHnK)j+ml@^V-lUVfa;JTxqCxeXpZ)Al3eqESt8O5116VRG4hNAkSsilfU71vBjq0pcA(#MOM(glyl`(UCR0glaP z&KH2$;T3Hxx}qEVYZAoM&maArXI|Y#v{de?y|R2`LQrlvh2(d3?&H6`w(n6R`xL-H zyPx5?UkuEi?Y|HIz87M-s;oVpae?o-(r|V;C-Z??Lt+*uJ5u$3(OSsz|Ik|gy-nBY1rk$X zMP?pZafH5#9r8z_2GqfDc4}plP99;DeNXW#-p|tE`4VYDSLV0kC}k-YOz*`?{4;UX zGs{*FWsbbP&KOQQCl9M0Paf{O#dP<1LgCvQQTGLL`0(+2gyo4Y@gA~oR~bZGqEEQ556)N>yT4^sh(D6OY#pcg72kdIWd%JZRJLk_5uy36oB3&&fEG-R#IO-G9Q_7hK^#(0qX4gx`w*Xg#E4HkaJ@E1M^ z^A0Z$?mlELhI$vY-I-xi2Vp<$B=d@Wx*R-2Ah_Nl*&2f!rlT+fvMiR^#+BBek@xMr zkw!lXaUkrAHvAPL--uo;VJ?M>mB=C7F(Tk#C^Kqdk8_~5;ng9L0I?ECRu0dn8TX91 zF*g;(*r_{1kHb5-w2CEKcp|P<HVSH4LOO8Sn9E1FQFnF3~XxBOQimDVbYhtUKemT&&I#|ko2Kxe{K4uEqM-iYXw z>-xGbU=9I%y?~g<&UKPsc5&r>CyqSY-No$~^Q^NxMW2&hRIT%tvJAs-x#(}Wqyg+C zIj1#Sp;z5HgmZ{W5zI@#R+Gjri?Hc^e|vofve7L@vY*_L?ftU{ofe_-tY(oRA7s!I=g>hzkEopp_Rm z%^|AnTUz@%#nlUFEyjSe)*-?kzrJm^dQy$&87U1@C25^IOxJrm&C+T-M=lK7{mubs zj`)o>h+sWh(F(s?CJdWPCze7W?)!^0Qw-CDMiK+p#QAzylD1(saCiL)r$A83eS!b9 zIRAt`>x1K#k<7QCza>*-@e#PtHpW(N?HmI)6H?Zb=jCqJ!>M;W@m2+-ffUmZ0{S>C zN(2L%@>TDB_9uy$w6aaXzf|(==E{R|V|yt&NH+1$y_TGMHAonL`*I_@12u^DPqw1D zXncWghiaRv;KLSMe)LZlz;5{Q-N;uZYTu(Ejrf|~m1c0^AoWglv_4ilCb#PabRM9M z<_bt7hJbE4+>zHr3R(eX3nFfNF97Rt8w{fz$q$Y;LP@!M0jH0)F{G)lXr0MX3HE4) zG@etpBcB5qU76rfKy-9=xij9053W}LUMy%bOd{e+Y`?h53Q!|%uSii~oC2*I(|pH_ z{LYE|E|)#5_@7Z9p#mTUEl&C2N3j2@#;(&e=Km0w{F^E@1TecyDw+~8;HZ0=ZZEqU z21)}Ena+ z=J;{MiY-0i^^>yEWEK7R*q0WLb9f$vIc(p2+Z-rtuI7RGzqffCc-$lCMG{b0S4@Lz_}y%p(vZbD)hMI5#!OX#8nBs2W?$=YsdX zgM#qnU&`#$m1FCBQR=l*v$;{X`JNkgA_ z!a$|ithJ4va{Ocz7c;JQah>k(W>$igqzRDdk3Dofy^W9zqa zDJqOBU=GeEq$;W)R1)7vsx3SN0iA-`zmDUj_fopV&`MTtQEiI?A%={uq4#dS>u>QY ze{lei2ZpXEeUB5`5J!-T)?%3ZjQdN7E=Oa`(F_%dl7fgXll!Lpm}-V#|2+^L$RE=7 z1(_>wto=6N;Up_JR9r;ZAi4TYJjnyQyDIx@WW>{R^_%{;>c>1?Nw3Y6g`<^2C384w z{O%5-Ytw#|t$y><;*NVk2Wr357%r`Fu3P_kakDmGH$k*CDc+_zC69o{byup#RkL%I z`o@J|npB|&Lmr+>Z<-_rH%m7wecaEmOuc~|Bj@@-890D@TrTN^#iyjF&^;1<5KpgL zrwj_-X%s0Pl(Vt;K?M18DkIGY_K>mP^5?ij3*#?33MlMH`UYlM&LLtAMWDF@F^e%CyRolu%_#ik?qm zGt>tm)0=@BZ>Y1Lz6`f`R=&#Rc+a?pSF^r12P&XPrT$n>AgVqazUDs|t~qxUgWE3r zJIR(mX0ogP4p*G46|a5YVjfdpy=c|4a}u(D3CC|Tq!Np~JvxIIcb&n#L1tXsRPYwB z&p8r5>AU=^B_!*3E8?BxnOc72@NZZ>W5v~=*l=x|pNlxB-DXL|Q(D9*XIDiOSJXcH z+857oW42B7+V1t^W{;>j&jL%4#(EhZTjTdc1ZA&UA2Qy!+-u4T1s4-Op3)=1;rC($ zyo%xHv@9fvz2Fz6Qvj`J99-$7$!H!Tgzv>{^2=6pK&wERmgchN!JoC&n$f3qx!wy$ zETx8T?)~G@C-*^AwYgftY$cIBGE+>OUKc0ldN;_u=$Ag#J&&&KJoEhSDmw36*6wYl zOD8al@YK8u{}wGLp(DyZk6YX6i_)hfWAf#on?{Z>N$Eq@4zW_xc2)oAQ%Y3wngL}A z-rh(+hpU`<6%ChJc;8b~-Op#fC0@BG8=g@_g`|A-rkQX0Z$VQlE6PXO3L1L3KF)@f zNr+;3>FA5|JvD~%Nkr$-V=l6Xk7-q-S^Qc$DA4oIKCdJ61lcc9Ml=<`VrmN(P0NmT z*#=S7gE~Sgtr@ILzrSn5?Nu4kA5-p#ax}q`+zhX|xqZ-I(t1g)G;|q?8rgm1%+U9` zgt3GBhdM8YOPP3T>H6j_nf6jc>auXI6B7^L{@St|R8Id26L%CNBReE*F1`9qG#t+= zifR2VyU!AVAlSV8RcDEoxk4!Af$3Iiaz&3}d8Lx#Lr~o?@HIYLES)tli9PB290!Pd zkG|dfz;l7~<2or|J6lDwtobX25haO@G_=ZP68`;H#7b(rw?;led8B^fCfhlSnAbP` zG=lfFzHC+Gum7N?{NIyS<#m8g#>25Ja6$Vo&+*SVW_`sU`;5DVlK1nkf`${BHF)u@ zvBlQMyS7%^H!&3m6qxc|*a9So!ne{EcF#WHWN4)IDEM;Xfg+8W^J7!_So7m#OXc8$ zc@i7eX|A`m`&_N{&xPGev_yY%`kuBqRDX%ITYXUO>x9zZUn$dRvS~;VzKPC8@?#8+ z7)vEk0^Drf^mO#-K#@1uY>6+H{CfcrKm%@LO}+3yJG#z-48Q3%u?;;P(E@c)PVt=+ zB<%|x*J)HeiCEIYD1%>wNPGc_?))Hzm2!A<6M~K5|CDI{GbsP1E($u4242~TwC4HW zz{NkqUY_!Myg4}ozx~F{YKkpZ60gTr2AXe7)-=_V4-RJMSX66Jg zW#?pg9sf7j_|G%=kO8^Q%M+vmuRouw|NPIFC<9o)rGr>X)46}!NdEl{ZHDr|SpB~+ ztVa*9ZVMIid5WJUhh>>TY{etSEO2^eh z%E{1EoRSI`CX;X|L5%K-m#x5d%Hv{y9Ghf!W0ecMJ4JflHC`s?TAGbHA#H&F~5fe09X{j~8u zk!<()LX9vH@*l2}%*V)D(|h(Y0^sZZ{*99_5KvdG{Sg%L{?k=Sbb-N+`HFP7f4?aN zj2AqJguXwn5oyH!(^Xng2#;El=pqIG!%h7jtlQu+ZMHb|D%?DDM0|w5fBIyMqhirT ze4}%{HjFv`(>DjUojpEk+DJ7zV&`_W^UkLc`HxpiQX#!EbgFjE3(=+=dV=91g@7XY z_s{o~z}`n8HTu#OIy*UslSOFrf4)Ae1I6FWnpIDzPEGbs!$ZQy##m=*G;AcF{d9h# zQP+Jo^3xAjp+lqEi01EJEqz`dmr55kPsBF-g@tiKk>Jg8SZ`w?k;=386t9#r?p0UTbL{BgP|`L+V*^IihMiFUsO-I6X6TZ#9cJ zBOQn*)#Ck;JE($(vZ#W>4pG$ky-UcCQi~lqMx2ByN{WQt+nCNomn_u#R%qQ z{3I-Q9d3E;^qyfH?F?Uma_{LBN8sM_yZ zKtm#DDa@_49YLSGI;~$mYfb0S+wUJXetzEK`cn1P9aL77$r{@Tj=#VyFRFzR$LW0` zpVJ;?A@aFiODC?a)E;s)7HFF5_m={vI_gi1lsY&P+`ko3$HuNuxEY?5w5`^h_<+$Zt2q8e5C8Wmzk@GPS=6&`Qi;~McFz|8VjTy3TO|MZ-Q_<~PS zIE==+#G9>OZ|g4>6x{3?LC+up)5Nz1sn)@K)4EX^&loRW6j(G4TA@3d#Xfj5a`~J$ z2|>{<%YbfGkOb1AH$NmrUT*PA$t~nvbf4S4RUxxoy_({8clD)v_A92kP)qsor+(Q% z&#&&7+Er_dYN1dSiuAdVkUI^{vv*H|+FM?yxWZeBE_iFHvXZ{}OGs7iIvKP7ahlR3 zNUqhtxxIxW_s+r4lgB(5K-UaQ0$D`O93!-~#bKOj78_%$4F;z_6bPT&0!;!N`2 zO~ARdp%z9BowK!0;an(wS{`0jTwQj_;rP*DvJ>xUC$(mRoN7WO`mJW7>`&(vnZ~8Bcu;n=$ z76aPn@4eb9#h|S&8WfvG-Xi?sBmB3Ou1i4L=iGwN)svL#AVWo#Dg1@KLAS=s zXznvjUK>?RWw3@S{;E&OE(zC`#)EMX9S`G00r4K(@!6Bcoh56bxT1%_b=nERel#Tr zz8?;;%H58Cls)z9(IEHk4|mmcxCoo>+aSNA@o< zVDIX`kaMw_x3E9G%Qle7wHrMuuv3G2Fnn!**V_BMW!>85oLRVu;`uCi>2Y4sdLYWu z(^!b$o$9{t@4d*s4%1MJ>^-4=?1;?qV`-6>rk{WK#xuik+Rfg8` zwMF@Evt|tMrAnY>o03=db<2TxZNMVOLuzO*Je*WwH6@d^xXy!<%VVwdi%57$URj>| zheyQ^s+^946C3W_0p<@<7i9`@@X%c&N9%VU_Bs&k*8GaZyQ3kTmQ@oL=Lc8gK(V>i z(UzrY*w;a8eR;6+>e9l%%G^T7l~*m^_)?Ka@`#B$TEWLo!LkVzSgUnPX$@+o%Jkx1 zWP1v8P_V!`uTdlOmF6n7XXHs+<)4p28i61IP8$u=BT(_fxP1$aEfwkyF#KX!{TTeEAt+1-Nc@Z@YiG1; zg{Z5Ag@s0*hPTAi;^N{`^LV0J zr%S&tH+cJGm8nx#t~gSYS%~ahh2dz?;8Jilu9m;2g)@U@*K8!_J>TREW>SmyB~O&D z8~xa%T9Gtz25+q)^7jA+OuQqOvo*E-jEp3{oOV{`9;F)56H!&0+gPk3lvFVWtpOzP zd&%3h;Sq%Qsf8%=Xg&~$(eB7qDxcP>qtG~8??x-Oo5-()C)rqk#!zf1@;knh56vRN zoR>f4h6kb-TU?sr@4mTals=W+&SoX%tw43lncC>RKEU+ha*o{Y{T4q?fND|&J4%Xo ztI1Qr+OO0EO>mJAUUX2nfj-f0_;mNJLMMf?h|* zcs^KmKDxlOWYg}EDBncbOl>`QR#``5}Qi-O5uzpqB((+*RNIV{{^881z(c!_nfCjrl=dV3< z-t!9-){rAHrd)cikGTwb`(o!%^ZS#U@S_XR=^u~mswhdb*j~d~{!9aTPHi9m_fr9B3%Vs%Kc`vm&QCR=VQ4k=EFlfmHHijG z6U=y#E&j;|w_nINLv4Jac-m})E~QqljqZB@GdN&5Fe&v};$|L2Hx zag{~HJF;4(fv3pyzNH_&o=CHm9V+Kh5_##EXV8Xsj&}vQ^*^-OpO;hpc_A?IwjrS0 zM*{p{etR&_IKfsY&Z2wzX6)yeq=j{#k}bjT2k8EfkjE2s$gXN9>N|3dDHDT>zue8> z(w6O))bUg%>*SnyDZK{Ero>rZLp{N|Mw=yDq! z3JXz<_|Ij$`8HO=<@Nu({TcHU)O(%N^|X3|H;qZa4oa-x&rv{Q&F~qvXRIgz*6Azv zjWWxal$HTU<`TdbHxM_*vCLWrGhJPpYUi-BIa+=I2P7;vog#o^+;8sLyC?(nBq6sHVnCq{u*-@ymo)3K9(u(QmPRwsWsp(v6x@3|C z+$`0u&5q3TCNlVN`^L*otl3r7af=dyKXS(cJDJ96`8^oIJ92ineWCRi{hPgADvPD$ z_L1yYwUa_E&a9T;0-Wbuj20)&-)CxqMtX?;yfwVTM>O0KY%R=KZnB{TpDyufbrnay z?lr1>6L~?tunhm1y4>t^d~VR{>1S_-?z7jV3MF4Sr{LE+ z=3Bbc{Q=T$_aKDW&+52es%b1eFmK+jyzFdd@*$IumT|ZrT(nAYe7eBGLOEDs%PkDj z2{`b@5e6A1sGeGE4vdz&4=2@ROXy*|5F1Kv(f?|nbI%k$L4NpS&~Y?*%M|p-wZjQQ z^6fqNVN}aK9elo__fv`Z$NBm8$mAHTxHr?%BXhZ(e{yh&W%^f@uh$Fzi$lH4#m>E5 z=AZ}RhZnV?-}~-JLFY#V3(JF}S4oS$!Z+*v?I!MbSUz*D_Bx%!x`xbkVeSv5AKei$ zFt_Bw! zR6M;Flf&c}*7bCUz0S{1wDMaU)!477vdtJez6We~ERXtXpOGy%EWeMdx7nW>eYx## zseyfWO1pg;M`#9_Q#>Gtq1?6qlQpT%_ zw-Ws+ByD#YI~1nAr@KKnt}bu(Ebq_|Tm70+{Ct8SaYIciP*Yy~=ZwiPg`TVHh zrs)XC2cO4Km(kRj_$QQ25r4W1DADPIFQkjBWHlru? zc2mQ(C7QqaWkj{4@D}yzAEo5pi8S}I+gaM6uKKeyQDGaN@GA_0i1|7jwhTvUhSS+l zXB;oh1Ns;0c2VuU+da6Nn_w)$DVHmTK1+_JdYQ^My7u9R5n4i_+gxGs^x(vT~X+?#?1J>tUq9-6_0mRfY-boGa;|I_I*Im9rLUv+`PHUwX4%o)TMUbKNRwvYv zQlx*S`Nc993nkk(P7uuGmbDg@+(Boe$3a47xq__~C1$i0I92cNPvoGNg{vyMXQrH& z>zv=g@BQgTq#*ZuFzkhF-@6=CyZ6pfetUhG4;J?769l-qx(t$!@L`jdC;3!8qQpb>wZEC?9D_%T^2 z*mL^TOih>5vKaGQfgLjUs0q4PluNhbWDE+iTK4xLUUgbtiYYyU}8$%V&KJv9-d8n4NUY}!qA$#yn9XXm_6iZhYQ4uP;bHzPM;V*ry zAE(iNGI_R9TYl^}l%gnfeN~&iJ~ng)YBB46W->U@@J_0T3sw;%^^ortM!SU|0l9!D zn}IkoSG%ro4X)*EpUK=fr11U}$-Q2~qSxT^&G>PEGzx|{P}(1T`-T!Fcc|uPA>W=N z@`s~B!L>A}n0O`Zs6<_2DKlPc10}DWJwjgxt$8Pb8kv&)R zM^OaMIL9nHC+aOLoJYOBw>{s!Md2Dkr{mM+_$i6__G@52o9e&Rlw@!0d*STecosXJ zGeG0i(uUlNkpsLIMRwR$14Iaj>O0?43ws8hqjCCx&F_6LO8HNzdmp0m*TD7+EMWpx zsJF*ZE9tlOEuS$7=XLB=*I9*;Y}@Nj!Lay}=I@ZVH}Oj8=a}_>CIo}hUaZB1EKkxM zo>mVEsJ7QpI^BlaI8VmASmhyY2*vJUby0n<@?0!1hBL7e55AhmWf}0l))9as3*Xb0 zHkULf`Ng@P-tR^|?br$#OeqhCyGO%|7IkGMpnW2~=LY>=ajsUSg(m1dJ#)&t`b$a= zg5fLA-M06U7brZY&k3M=l)bGr(?&!N5yIr?_4YPJ!2QiE1KrA(wrVcBe zJch>y&=Hwa8`gXX?G^cOUk4NEVC=9l5mcPb9OJ?(!*svPQ0?Aayr4dg=Pkf<&ox|_ z=;BCSZOb4fHNC}Nuw9tSc_+|31|td4IDW~g*$X>rF11J08yN-X)8)bP?esTzIy<3I z%M7y=0f7pS>bi;|Jv~y1k#&@<>KasX&UAG_eqze~av;+5D+EXT<%82fxFd@On(12H zSo8(60*-M@d<4^75d0_P**dtFRI$}=y~&k z%N|n*{9r;XYum4rkG?ybiBtD8sVsfZI{aLnUZBJHFI??AfgrY;%F&F5fxr_s&;w2D zuc7xCXjH;NZ7Xt|_Ma9LE|99rU$Vj}e~?rymurbRtU>N%puZ#RM8mP~bt39#5y6~w z&9ycCtC+r%6QLdF#0y*S3zEz7)wdHP$)g%rbC<4{ETO+}PPZeynmR{8wKXXM{Lk;V zY?|7xX<$8DM#I&cZza&RGahtVu=m;7681K4@4t_XOuUZ@Uo?C*NUSN9LSjE!r0wy# zEK?+9xz;W;al^+|t$fLFK8tEAv7SOZqu^Zc5g#0;u#`Osiy#KxMbbm`t^)#Tyg zD_YIVest<=dYZVn*e6Arw3qBKcIX1cgQsC_(CEsY3wE*f{hTKUH@;tOs$4r*6rO)7T{g?LyZhP@Lv1@0?=v=f1&VkUreZSp6z6oF z*^f$XLIcP5TXf;hZA)$Uy@Tz1{3TN?;HFMhpUIBW6nr`AWLxntgVlspe5G_la6{kt zTB5>f{5kHQ1G-7YtmH?w57kZ$51JLPPxo}1-O(JFV_UQAzVrH{beNUR^=;d5o>lH{ z%5{~q|17eA@c#shQs-~Liqfa)xGo{6dj1y&=>@4p0{-qWJhh7o4^9r)BSYCx6~m_6 zw+}SnRKqdiTD=|pRsMlzb(^jz9Yy_+N5U7hrLZ47o~+APPEYFC^lS)g?Y-j0;~>uD z@pmjNH{13Kj6L&r`vMGM^AO|C=w=C>Ixa4GD`?&Zw*)y7?5Wh+(OQ2`Syu_~aou96 z&*G&|_XcE>(Twsh0Q@mxOWN5&H#-!F%%Mw+E&}^jQJ|zGeLRWeAQG)l3kWdXnr|hx z>8H{6CS1O#u>14sZ%ufUUQK!#OCt{Zq8o>VxZfPVlhibO%u8293s6F|@9FB{J~v(X z5=`~q3~w$%a}Dn2p18g{P!$oG&vc_Q-2}knXd<4(qClY-AE9;fU7SWm>LG`JUO*r$ z$y%pDbS^Y1-7I{qlMZufF7T z>|d%gkj$~Dt~c8I%yXhI$$^(U4<@iA_SQ!>#iTd(t2Zg@gIddDTNz-PxKw6kz9*;` zZ-l?Bw^`X4M2N|8>g|6?HUAxb&Bl3DrMtUU^7!Ag!R2R_+y(raPYOxqh(-_24n1OG zDn~)laXCAA^>x(Z+pV&y5fj3T;R?6QoeSjw@P7K>dOz%tO{a!EBK)BW;cY}He{yJNJ!8T_Mn_1f3UHwKg=xISwl{5>cWlkITc zA&;I$y!9PBxU#`jzb98K;=XIVGlrVsSd_?Zv`~ALce}r3(dVPz(Gj9>?A$UHHMu#( zW5Y4`1l|@%8`Z@x3|Muoulq9Dnv*WUQB++84#gDp3wy*uaWvMlefuJcv;`?@^7H8g z-RDcelJh9~nUb9x56by=VPcvzqECE%4=Z7G&Yz2+q~lZ0e1u64dWjoaddTWeopnf= z-IY?7h)%cKHH~Cn936{=8VtP*!2}BzFE#jTE}HQknM=j!#7_wkZjHyLQm0Epz@APn;Z}>FGREEYL?k4BOXC-u1GX7O4hHL@hnR>)}8Eg9i z`k*bOHS?tB{V+uODfOw>Ysrx0PRrbYPrL=+B{hl-PSlQ$a~bCDm@C{Pm5bH0a&0a= zaX#iV(3PQ4b(6~snAf0Vn?h7B2*u@W;|3;PNUidBLPqeL(e5qo3Y^#PwQypGX4bAT7@8pnqjoxaT zM|yib_^V`B10zkYk&-}8y3kG=RJ9@XH(X^T=!au-yJlK*2TkyFwpypngiHto=$kPHnG{P z*Kk!lJZk>7+n$||&p$D_H3Eunz23{wcRE^{+54rR$!-(}uj-Q~mwS-;CMWeKr`2h? z95OZJ{?Kr4qNrp>5Y#6At)tXl$Sv!gC1Q$Spy8!DKL$ZO|g{DMCF8(N!|*F-i22D=y|)?>fhpdS zTOLhj^V~!`JV_))-Nq*dErTtBU_VhfJV&&V;M9x<181C}gvr!ubKO4JzN_uTsgOqbdWhf<`~w zh`zx2X)wUdYWpI45k6 zh0()CNhrsGV+tyz)HFi;NmB<^4bmNYZJ ze8SUzz2`T(Ggb!S^0m_B)de3F_sE5K9f#E(OxB}`|#093iR;!V#=A4(6|P6D-zH+dn7RamA5xq%gqbpFKF$*c{WZ|q_g

V(@hwCkfe6P>i z6USS#261@^<^P=P`*#q+BA?ag&s%LX2-@mm1+V#>DX5@}Z8wK}j7j-~(eg(Q@Si_g zV|p^pX$|98H_NnEVzu~g9Kdxm0U?V*Cr$nTVVn{q$Kf#NIA;tVOO~sXe(<3^9L1o8 z0DT^-Tx7Qg%T9Xsf@eq1$7^!ZNul^*^eKXtEw>nI49Ou!=nB zgH!`jiq^pJ(%O|T8&m1b21YlZk5K+DyhoqbucG?-Jp;#34S z>YZ`v@}KAW-f- zsLoQc9Yy5Zkh*-yeg(e*#ae^C0Jt;qlZB{*#u9>pzay8SP8x+1%7qWLwUAry5@Enzcc>!b5yt#l*Xa`=~i5&liJYSu{mzlIkMo#~!i1ev(>|LX3%qngV5G(IXs z#e`;%BBBrw0vZhv9lA&jMM9B)0*;{*3lm5fqz%N747~?&=uHp<1W+JY5F#ZaMM*$K z0VxUrQ4|3I-y3~r_UxWr*Y}*=KX&tX{z;zu{XU=Px%ZymyB^dTS88@C$qjkgz?_@` zu~Pl0o8dZ>sAYZvqJK4`P0p5ieLO}_dh0=|K`c>Q zF4%wc?rzEOHKNMUx@qW~C?vDX*j_nzYSC&c#cE4P8euu;!^PSq0b7Y=jHbU0MLS6n zmaQs%l6m`Ftq#8r7`3lLme@Fl!Eb*V@lNiMV5^<-%=i&&@r=kz8|f?@2D2(!H(Kg{ zM(oKM=`0$8;Ne%sizknAQ(FER)_Hoy3w*WgX7?Ls0vLW4g znhK9~DLq|G+#@$^<*znie~Uf-8Kj3;%_*s=9C2i}VLe*s!=4$AA(QvMt1(fq*!$f{ z+_6dB(@4|x&+wIs14{%_dCMp#eg01M$3F^F`xh%P8;64R*d2Y}Tlb^Es>i}8RM&o% zw;yrZ8@8wVX;>xi)zd%Shz9uTx!bfCHlK7>kLvnj4P+%B>ThP)6`ZxZd)wnTIixn& z`Uu{FZENnf)(1_(cB3l|xEfz_1 zAz-^A=cuk(w+`xFO+vK|b$M+cFaOen(hJFR5^4d)-JrJQb#B z#pe2-K6cn=tU!(6FFqSM891C-RaR$hA;N-fG0>t~#L1Th`nRLurQnm>kH`Uo+i@?am`ZE0T^L*|`aMgx258UrNkPtjJJb&zbY6Tc9O zS^!?}i8*kEf2slI2(GodWj?E`%^XIpO`8@jyyzd4=BGh(2j=Vf3z^u)PcO1Mcpq-& z*?ke)h+Lh?I{*5659{8uQ9U*}emf5v{(1E2!Ro7zaEGR~=L7~c>wFQ{8?tL{cDdy~ zNPRUjE!KSHXSwff=7+y*R;|ce87mwgEsI7;kDzu2Hy=^8i+`Z?VxWN#@cgr7|KbJZ zO~l7ao6&ZVO+3}Un(~((hx8L=o(^>1jPJBBV0xN5u6xKXHbhN`3#-G-wb3(8Q$g4< z&$9(XrHusaL;6?!T8VjKmo@Ok*-Ej^ZNEfeICK zNM%%rgg{NB)OZQ+-bjBa19TJM;phBC;0|(7C%!ahi3NDn@6C95>$_v3$zwSFLZrsQC8os6RvWyt^=QP zKt%=^Nj0vF!~W~HGhTd|XLd}{a6>z2-hM!ix%0@9Jc97=Ofk8-r%e}4mQXYS!y|K0 zGKOWK&x>jAokq0^4aFSE0dxY#oi5sQ!ONZ>+mL~NmzutHy@jOkAX(5Gki&28=&{S5 z+pzgVe!IQ;%@fZy#~^oXHA}T7yHGW#9EKMv*In0PXpijcD?0iP6Nf3*FviMr6d7bj ztdvjNUu@m)hIAuE?J@s_xDRQLCNmL4(i;R4)HK1|Ivm8N@d~OA9D(oa@nB;}Rvjy1aOmd#>PUC!0~Hc!jBnyuj?TC}z@N#HBK+OD3D&4VWa$1{%ET zW=#Fjd_T;x+gACvMR1DH_DrR(!1BZ>ynipn9BdmtOtRe~h9xEN{|d(#u0Jr!FjuQX z#>D~`>5v5G7(Eug>-p(+OnDr@DKZd;Bta{5S-k-^)!}n(RCn;n+6h4`+d3kl~#n66BN|4K5 z%X3KrUWv>4i&vZ`LfapF0~Va8P?Z~L-sfYgrt6&X+R#dx0VVnN?=G>iP#ykkM(`;f z{J)md#np-gqxxv^%RtkM+PFr7<(Dt=&FZr?ew`^W@Ii)^D6*0io}az;{IL|74_L>d z8GtNs?Q8MO=cRF9iDi+kd!VUJ5et6F@Phc7^PG@MA;lJp-=tM9XzeQ%^8yTn2 z^)%UPrFNyQm5#9M{srmV6yB8$`EI4|p7*+XG#NeYL?LF_tbZSl3MHkDPzMuLaeICeAdyBoA*GB4xM>6z3kN5yB1+t$89X z0n~(cXQj2C{BoZmsu6<77l^FyDh}hqh@xhp!2+4Z<%-a!?ArStv^^7Ib>@tTO&)HE zvh7q{`zp4nKy1Zh4V<>3w*&X~l*;4so_FCxpr_aWlV#mc^P)?{-{CMO#TO@l(&sCt zru`-RcQLg_TAb6DqC}0zO$D*s2T~NNNAzNeX&~6~SQ&>kZT#VI8_3eM%4Eeo-{TPq zWuQUCWESh+7&UzyrYg1?I;Wqo7qF!+tx-0-t^)&mmf_?*PM-jR?z*=|^ByPq@N7>e#{!cnhxHog`8=cPZ2i$RB zLYKD~b2?6<+Sv8Y~DXw||I$xxMugZZ^M#RGZ2&+haaI%pJ* z?5C7Vm|VpOg2!e-@R<43on5^ylJGwYhf!+Ov32}$(Xr|GUDu;k(+ISN?VBpy6#(V~ z&8vMC(t{}ghsxV^W))C7-b|iI`2vw%uLj=QF}b6*ImmG z<5b?Q=&#uf?E#466Ys2$^HxdE3<3W6(&r^TQ>_2hI1*}lb=U+Y^d(&hX9qYcfkm>9@ zRBX1RS7`4`);ShUUPVLCT)V*y@>Jl}4X6bhB`PHvm+S>MK%SBPc+s>S_@ey~8;r`X zx!%^D%TaAGFyCUgtDW}IGS|djVT86)D|Y#NU;u{-2qfDv(6-1>@m!IF%noS`Wbpq{vTV|U#mYMSiu?Nmi0)KcYuK*`x z-!GNmh0o_0lZ$juilWo|OZ|W=nRQnbR->CDQ!H>|U?m9!T`g{w zgO*4Q6jWKG#e0u8P7dDN!hW!g>BYnkCfOj{)koweiO~41{U}49uebJ#|sxkX7V_3l%O}BLAyR;`>_Wj3VWbmcRMfghU}8x+51`m zTO*z!lq|ZslO${pV#!Myhee#uL-f--&wg`myN6iw_1yvp5BXOi93$HHnRi+Ke(z*D zc8i_zq&R-(W+#^ZfmHS z{8NJNyZym7DEn#YzU_u9d2~}1MQU})w|hE5u)pu=&<8%hbYB{hw+Dx9s{Q|#0J~)p z*%F$;m#UWK-ShXq_1)i+qiW8*UF4XfqtT2AufwI{BzRJIU638ai^v}I=s)e`<-wY^ zZIF)lfUR#w@;qP2>;B$!V!2ogB2@P9PgkQ-VJ)Z1)XGP7Rmt%pTp7d(&~ ztsn-ZJI+4gcC?k`?VkKl>;)WfeQie0(-5NNfNyvF5T47%{Ti3}c<9=+RL8X`p52$| zTk#uFeA8bEGmThmU5MYDFqJJI{Jri5=tW0^_bq(cWP2qoHyr_!_zC!+%}$syj{g$# E4=l=4mH+?% literal 68218 zcmX`SbyOTpus%%C;O_43NpN=v?y@++f(LiE#TR!E?y#`9EE+7q-Q8XD<^AsO-aqD? zIaA%$HGOKPtLk|wT1`a`9fbr11_lOQL0(z|2Id3szw0C7zbB&tsfsW#R4@wC5?Wry zr(O0y2SbmImJ^rC$sOaBN15-#5n(WhcthtZZMh!c?lo`&Ee!>wxTIu^B{e)eHG(9a zBusRHDfLx2<|P7hVqzkdq_p_%V(xi1DP-?Q)k+=RCgWlQx!AG;yz!bkj4x z*fd{jV1M3KbuVCDJ(nzOQy&p+Y5TALk$*#G`xeSA2LFF@#sB2=W$iQnE1y8L2tfRQla)+C z@c)J@eswGUKb7dvSG50M^M8vnA^#f=%Gj6vKYgUYC*pt0{*OiKI2q_rChlKx|EKQ@ z(GBna6Y9ej({Ygk)y=a=fBk3ae^bn2KZ-t!Y#fvWOaPp@kI{jF0RdZL5BCop-R?9C;7;&uH@J2gV4TAa$iFz!Me?8B zsJsFYKWx9RuZQF(P>{cT=I5B4B7BMT-Q(TAJA{HiU8E3Bz2#6ok>=ZBANWoFw+)7% zUPGtA3HQOH(b%EAyVRouCEs*q&a)w(%&A9!7$=8R;LQHj(=79J0w!e=7-hR~1 zK!k-ohC!Dj$ph#xy3pO1j{VUmH{8dhzrQ*srv|)a#InWKeO`7%8GtN^nUSyqP;jel zf#~)8%9@mq-;W$i-`2U4YuB3)|DkNBw1aI^7cZC9 zbIwf>o8D2j{W&IJYMDEKI3fSpJY&N!o*^{&%aC3{kbihjpe3%*k7DYfYJhn4y#-SL zL(YK^R*HfXnvWDVRAwyuJ=UBP8l-Sd{MVdM6PSXW#T|Cl-d=SSH?PiZ$?bsws?VJ< z83*-%8hhY}n9QTN2!I=orTJBWXrT7EGWd+_Sp>pwV$}KYzJ2((h)4>lbD$*ZmGUj{ z-c(;)t#cqA5>7{jp9}T{`8LnT_^QacSx^!jz&h#9)CO7c$C+Uh^H({RiTi-HdBP^f~&Ny&RzUqAVMQB@q!_lJkiDClt-o9x_k|z zYR}B>6VY3#+Oxsd0-UpwFZLK^XmGe_eP8iMe!26K({GXjCPwCQEinnX5_Olpp15!1 zF9Z$Xq*v=xWKKXgQ7X9anK|?i>m!^masS7wrSRMl^dyb>b4durbCU1Gn*NsrZH~4y zQl202VpOqQIH#YSgQ@dh_+MpsZoHygIL;vs#=5ej=wWLrK)P7iS}{sY+7HZppmGSn zD?gT0nige29&B>$63~4L@QNZHA$ojtSposVLBba4s7QM(rM^D*{h;e$kDgp;Mb;hU4+(~9(u6P|p{R>?3XVdRI?x(Ppi!4}1_xeq3GCp|6U`=YcPz!~T+n zOKTRar@5Uoa9-L}w7NTnj<3j38|Sj+`;L7vefJ4-d92_nm#6!(X4sJqgIOh^_>$|p zEkaJ+sQZ%;^fOtty%(ZR-ga$NV;p8Cw(1e(qq1?;#Yto5&qw4P`UmZ$v|UK9K&L_1 zHo+~qW3jE{FYu_MU2L0)}`f%`O?9z^EULOctjCv)f2B(Xj(_BmYhvk4xIYj z>yb~rx6RrRIZ{7cE9HN&!a`VDdb`}ssk>!mja$nRGE0%eaa3=qSZG0esZzpf&Kv`% zPrgKB#=N|jaPu5mWJxELB1KrX;tc1YUqJ!QX=AKjm2fRstL8Zb zBv%ZV@<$`VsY`bgF1glZ2%ZYMTIuSXW3rpnth!-AC~z;p z9+;fQVX~>_^AJy$Z-Qe$mjitxS(-;Pv9_)?pUjj|Y3aw*1WX zkJH7?F|NbnM_mVN6AzZQ9P@b-ve4oTl&g#ZIn}PE%87dJ2b+uKcy4sJ>^8Ccu5*?| z*u4kFR^{YEFYQ%r64nD2DMlcoEGu+0DRk??@48MIe{VJ5zxa<`X_I!tig}xUI(JR# z8Ewz^b+=*X)9cTAyg@9-6=t)r-F{JTm)V*}e9TcXx3n}+8|+S=f&JLJriDXc)QZxQ zK8oqfCCWFtTh$)Op}VEicS|l;MfNkIVaSD_YvXRVk%mSZSPnE3W`2;5SBECqCd5V0 zY7snp5&HX8g-UQ%o3@usY{{P~oS?Y(37j7moY;>+Tkt3k&LfI6Ck67Z>c2?@E(hC8 zIKsyYp~jvYDu+_Nr{al5N@-C}OR2 z)+di?j}e|23O<7>>@BJags2-}3C;*(G!^Nic*esIxYvLmUZ)Zlsy#w)Lq$Dg4`$&5wQ(m0 zS6iafX+}GQ{5-S~wsKrK+9^MY&DVXE4bRZmB5Fx@SJj2^2t#1|*+FQoRht?~CWfqY ztNF_!{?C6TEXhzBzlD!Hj%?1v=){0T4WWEG0n;d3&q?+S7IfEcL9@7(`hP@5nh>t? zgS~htjM8D6E-=1gd=9WY_48=RP23wavjavPDjPKI{PTr=9!qh6DfP%rT%tU=@1jg! zJb8+Z8f+BNCvtET-X(_T;{6qJTO8qD0*~hdal}Mh^A_qaIT%cfYK_fIDJ6%swGpz^ zXI)sQGnbQrDd!wFQjPjlWGd`d*&pat zP)|g$;p1CfvBB-2)B4+2%JH!|HI;T~J!@^=3@L|>x6xJ5!Wf-%2W`7*Gh_@2F;cwl zdHR8s*QqSl^b!pq$GkFP^Kqb%C`c?2fo-uv@9KL^N7b{8EMvWs5@LwYUm>s z)$PjqhA+b63DM+Y-xp#ugwjyIgkH~8^cr|L->XXyj77M!uFx6PV>Az{&Il~UZ7w3~ zew^3CImsCN#sg-!PTQw$Vv8f(`fzTS_{!s-CMu=)ZJ1mhXnp8boGd}3hB&vNqC?hO z{6_S2orGX-ui`Z&q@i)8B^U1YZB&rccra|@m3r&C+BArq7%5Z4$fWA z*a>6kVH5N{Ud%>ABtYFGt|d|butO51UB$#-hVe~zg>^?sh%o^ZgttO6{gCJu<)z3} zrY-kf7w++(6R`KbW_4-lH9v|tpi1@@=Cbp!%ypFs%JbkS7iCap+1HC=R?<;ALHR-Y zfJVQ&#Dh>0Ui1SR^uZvGdm-Hy8cnPmPZ3!EGR#3uc{;)|p(pISqpS<0zzq4Ujpn7q z1LSsno{}E}oyY-IdNb%Ee1FDPA*6+}>RY+rGNunuco*xLE7- zBt~B}pp^GnZt zQIZ|l(?ye3)%17c^IYiC79&x6y)Mwp6j8`%t?dIaCgwpWo7afzYv(#ZZzuj?P)jI-@pv9EKY zO(@N)2arjZhy@L_5l?InNQ4IwktnblGJ?Z<0(jfF^G`>&S{qxtt86ckX6^43uZ=aA z*2CaijQc~SEbpX2*D`)+uLedryhI@aMRMf0E)4MAzwCjgKmI^X$L5avh~?B&FtgY& zdG_BSqL&6!0d*A^^~A(I!wLPKy8gy9ekn!y9)-CPD~8`MO)T>PC(~Ey`G;nS&N-n@ zi`K$8h|;_yJojE(K)NGRJu#5H%!n9OX9ps_^IAFLn>WlBtgI-0yTLRiQZ+@pSCOF% zK(k9(2#D&|lAY%*(!#Ev&P{}(`H#lYg(kr{@DRPVx`T}$v>t%HmP`J#xbTQbsZ4Xe z7?n8EY^y4@ov5^yh)Z{Dt?eduJ9jPa)PrNx>*!`m>rN)FWP>42umpggI z-0RW#M7A@7aXpW0tM{?3YBSGwYoGQnve9HQE{nu-4=*YY?5|JLk&D$@wY4CqVLWo+ zu5Hg{Y4Nph)r~duYZ}}Lio_n=h}NyzxvM{K4-|7XijDDX{g$2JBSY_nZ6u0VOgUnO zJ}*9BUA!H1kgS=HS>*x$GVloT+rU3-OxF(~$ksSgS06>LC3z?k_tlX60xDD#(pXht z4|Is3=&@qujuP$7|8C)zTbR7r?1(Y(DpO~!7`3an>c*uQK(aj^8^$QdHXB)3jU_Kx*ZuZ zq}xDNI7ObKVI)s+!aXPFYq_Lekw8SP?+RY!!ioRJ*X3oqGv_t{Cbk~umZ{q~G~p8m zZBcC6@9V^vmRgYI0KOat#V(~yQPG~_^V)VXxqNIB|AJJQ_8SSrE&X;0Qz`H_Rg7we zvG3VdPOb2vw~^nlqR`*}+NH|}n%Z=XV{}fd#&?NX>VbrbBTI{Ra7GldB8$bTkRM<9 zwN!M3+?thFLEdg^n)z#1d!Y6fLMue!LYD%B_meKk z{_O?`UTi>_+E_sO#+cqCuP%&r9*;gb6T{5xiRgNybQt!Q0a#eaihM5%uV31EvK4Mh zV$zypZ@9!ElKU5NpynlGR<;G1VvCSfz4uthnR(|tA7Uw)@NlNLP6K=rBFhIV%}mvQ zp4^pUJUh2e3lW{d$A5(n`%_dm7rx#+@G%|l??^Js=a!m?FjilX;$Ch0NpPxA1rlAB z!##^eu)%z2K6eET+!#qBi_J)v;}_tkC_TCycPo_^XmS6N?9hW_-NP zkbTTxlI`z1eO>D?KoWLjI~z*B_t@QYYv1BgM2oWk`d#s{k16{f` zD*iWJBpEl>)bV(Xezi#Tc6}dhPU8}PS><@}-#V4AY~zNd!fxKmTnz0z* zNyOS-1sdFWiJDpI@sB!0F&%%;0SyPy1~_CZ(iyC!OZXyo zRW@DmQCM`|%MPpbn1KwjLJH7<3k8=CxhAP|zDw$sg06Su{>mL)WPecE8LjqyBg0UcxKJve{l>pGV+xxHVSP~j#oTN(Ra6cso3;E7X@``E5U=zFs5PkybTm)*hnzgZ8=1sf-5VbKXv`-2 z;7R?9mbL2cBQcVJh%>zi4DSQ;z@XHsZnvS%Dg|F-xYnsO+k7)2r&8B%t@So@P1h>) z!Y==<8T{nv@DV_7zmC4}tvC4-1?#$o=&g+m@#>;2g8n)See9~5Y~?V=-tT6f944~; zWY+Uw2=UkkybEw#dd$!w&#PVGmk`uo1zWFg*71tAbE@Jj21;?PRH|0t8UON;Pb-MN z=1{*-?!ako_uT$4gF);VprMXk2VOs;2yMVaB0jIK5JDf4?u?H-O2IhL(8$32-7-9> zf0kvW!K7>?$nSCpT=9~v%xxh`w4(pfB)<}dpo%bEdg3Z+B%x5N0iGYDfRkjFvl9zDlMCl z*Ib|&otos~>dvfNi;n^y&*Y1$B~z^tPZH^NpKbo*G|~dY_!qhCS-&9D@G5+D`N-vO{>~I`hq@Z zA=yN7|-Dw$@`M2Fvum>@ZpC;6#4ZY3m&(Z$Khh7^) zBVB~Wy_k=myk&%ndIpf3HoN;S5HM|D^;kIHz z&Zz{QBdG_}OfdCbk*El>i6d!@pa)g+6~O1ivxD1v-xGd#WFu^??LcbbfN$4sK9VYuer;?3q7u>d+K8a0h#7}$aHnRY-b=xsznizAnpYd6CLqlK|HX-pY> z+BPFj2YfLutONEv)tnaI_2LHj)EUlL=fewj)idUq-Zi>f{Z2I4)YDTFZvW~+e zux|Xt!f$7FJuqyv48eJgZ*L`L0tf-66F;!btC0?N?1vC(*zbZp$kly?{0_J2$K1|l z^VY~CJeY(kQI3AKt58bgt*R3ejJuN9ep zlud7=rn{15mt|^7mJYUzk0Ha~U~!c^>{+>deDd>B;G6AEpx?JD6A zVv6^QvZ5($3;B}}PS5=CEpcKe)@|iR<6YN)+y>Rk`p>!8kli>T>=)3!NWa~!r5-M{`R&HA(|Fm*<`p}h3 zbK+R>wkf>*z?iV9JjzO{)36V#0PXiAQ*jgSZm#5M1ncCQQQ>szx+4f5Bj^4Dz!t5L zFS`jR!Y(OM>b%HCJP>O*NPyuN9QSOK$LHWEVlTCgc#f9&6vcs&puu<_W{_9>whj<( zd-$RIwv=8G^GvjCI>~}fm{-vY?dLhMl3CS|@|@2nVl==6K(uV+rQiq5S!0#OX>aF8 zmC9~OI#Bg@HUzn40f4^AS6y4pGiq7A$-UZfYa_6DjaDa-bI669^+rEWkeT)Sk> zTicDk8!eZzrQ4FP?5sNeSD|@H4hr&M(g3yK{Xk0lG4}UL0iT%YOM|rrWgrelKue!?;2C}_)`VGLK7enQogvQ zb+l;l?CvkrYUW4?i4v_sDyH6OMQURE^`r9$qv0(C{m5^sv;%Oq(xX&M<+&$VTvPrh z4WKG3zcDxU`gVqc1*;!CXr#~KG#Xwq`I&mxBJd$sRfV7_@p{#w6wJcj+R}rzt|^NH zH!g2S-G#3+wv} zTZl59PuZuoH*rhLw5)ws=D4m=Q%jH)$diFWNL*pXF#@69T+TFMQU$Jt-C|D&8*8gf z6%TX9JcA6d{B$r>`pM&a^Nm+{ZWYM*ywA8$<{GB+;v0$WeL0%6gMTVO!#@4QFfvno zMisfz%fT8cDHRxRF8;i{wIy0)(%8_yCX?93N2ljEh0(X1{>NzJ=trp#DiuR6ug5tH z@pAb!6z)K44M9Y-r9Ke|*O1L|`F$rhD_+^2+Rh}hT`%XMeZ zKX_Tv1yRIw@vw>7-Qaj! zyUOsU7l&_CR(@{hn%M|%)}_A`9L0a_dOrD+mqi+}$EB8|vnDmN(&=;1E)=ZNXRJf* zRIcb?8J_&VTmax{$pmKU#=-|97JgQ|2NtwY5B%#`UkA^WzS$e=2sX_q{snAsdL>VO zEw<>DJh?lZUeLGW-{Y|9T1VAtC%4C3EFRs2fhgqI6{Tn5e<0`R7KDkD0>xk3en^}& z9_NRY#IGrWi*DJNjj{CAu0&tncm(#)*qluEYg|}~&I7Y;z7rq&`wFr@LZi7OR|7p- zn19EtLAAI{JykDo?kg|05?tvIDl)lxS>|}hoXyps6v27%_W~)Swz4Rd9ZD~q5+p>y z*Li91pH7eMeP<|0UqD0=FRWI7!AbN{6gRQ_qf|~%cG!_o*rG%JX01U1FVXu7;)VEh z-LZP#!@d(Vn5AaH3;w!Z!LRc1KjrAsk@9y~e%0o>6q`8G-`3$l;QADgT^!rPLhC!$ z-a$HwkzYPbY)xlS;edp}^Y3@{<_TJJ>)18mzQnJ$JPPYfSJW_uu|P*5x$ERACP>tM z!m!W`?+_w4cDuKrxdbWv6qyivz9DiY>o@gbrLZQfkkx};XGO&HkI3aCaB%x_qwC4` zs5>&k#d5!tk6K(JlRV)k*fjK|6kbqPLl<@;OR8Bf3gAm`6szIZ-D3p2s$YHbv{T!f zMD{v7sJ4Z0pi;rd4Mk(^*H2{l>c>n{P!F3kzqtcHl`G3f9q*~x+j%uLT#mm6pXbIh z6r`xnrkvvb%HLI&8cAKXwkx`_(dlGSU%My%I!?)#tEX5bJC5-kTYB&u0~MI#N}=d; zn2rvAaGd^DLQAUJwejxW!f^6>vA3!%VL8NU`P$iD*WcDQu!$4%^iiY+5NXm~KNauK zOg1<+Y1xBx;w~;d-(6O+WHEwOAokPmVSDK297oBvHacCA8+(+~10lGW$k*l}h;;0? zFFSVhh75K;h7?C-gMTFe*BO)kj7h;d^^Dyu0ZQ(~Mihnjf+HhKJ@Q*>xo(?t4S|7{ z1{T8TA;r(bTnP90-7^S>AaB5A8JSq)>ABLAj60d4RQU5I`xY({ZoOK*GWp6M()W&* zbdy}dKdG`Vf%C43N{SqWElziL5j|oVNL(5k1~u0SF62m7@Uf_=IyFY}z?UUbCl0GO zOFWnE01xCV?=zMy-XVfCLT8bt+UIc&dS=CKhL%ECj|fS_iwpD2;nABbBNa_6CIrD| z(i?A8z18F8rSNcWXAyOvapa77;dI4tuW6JlosA&Mog57_5TNKsanzu8oI%_M9$chF z8sjAx+yi)IvAgZRUIi4}HKP=HZ!UQdZkZ!nO`e;x^dy6`I=YBLLsg0_8Sg$(E`;w3 zYZ$gDq8z&wt2j?G;|)A*KhEId`t+Ei?0Uj4SeZgs;Q^rqz8A6ae*jjd%B6_vvWwwE zoWk$s1K-&vM8Ey}b!Nv$_Zs6hYI`AeJX0tzsr=D7#1;1a1*rT@AgT|=zD4U1osyRu zTzG_rUkFVP2>kvw@{n)4+mc-8`K94^j5KzsS;uv?k?wWR!9jL+1h|$bT;_g{b9Bha zl1T?4Ts=q4ij83W^Dy62C!$_$dhWCAmwFxhJCXY=9`Wkuu8v%UuNFeAx^ytsKNX7m!=8P67aSt{!2*r)bEhxtI+ zJ$m8i*xBxe2`I&0qgHVxZRChHm}b)$ zLbGjn(t-`e`}FaPH?M2&il;DwxZDh1`?oSn_c55wzw9KB|EYk=JxN`9A zG9*N-ugnPHhpJJw4XH7DiMc`Mcnw)GYoU+1pv=<7>Pls5t8=X)E^Hy4=%I~h_mM-2s(Sb4Do!7=&$M)`W?X{G&1%+SM(jrJr zzp=hv!94aOd>S$@PF&m5(y*uHD&1Bnygbq+mY$5sjN!iJcBF8-a+X#>38_b zMY612UnMB&^BwW;gyw;3=5U4VPwn#?@BfHcUsHmZ`2F}2kDV>(^lXK`m`NU2deCRa z-ERwfU{@dWrpaWgpLejhpdQJAI!;phO-Z$%e0p=$j+R}3x$dT>G`S1tSa$+!Gny4W zMd_3)CXNf1T&sVLW4hG1tBNOjJ+R!kN*+K1Z^acQ)|95Gd6h;pwdLDtWm=|R1l!bH zaQ7xy>mI1!s0ruTE86ZB2Le&q+djC;Ytrd1x8`-<)W08Jms#v%6c%Eg_M83mv-8lc zfVLPuNapkWah&*E-i&>%e{gEx;E*`>xZ$?U*mLtbcWsf)nJ>aw%7g>$Fl<~Ek5?i< zDZ2LiGjh<-I4=wm*=@$X?=K!P7-1Wt3feGaG9MynJ;E6K+_hJgp3t+`aa*nz_bgS` z>Dhk&N+h6Ui&RT(E>6bu$Q)7RONinjxrXdIMShI@alYx%sX&-}&DJ+784vLfxI=1rXAGW4G+l6)T~kUU6X}iyq6B;~Q0U z{Z!;r&L={B?JW(xi7M(@PwRG8b%Hd0j2`-LqNQaISQkmFqOSg1_bv|rQpCqLLWlJm^__xEg;_k^FguN`a$ zP*09BN_osmxNpSUtT9@E7IOlM_VFbV$5_gg;UU<=T78VwV9K zCfW63n7M&$m_X>u1=4${3wMO@Ux=k{U{BIsQOH>EVtjRpZQrIba#GrOaADB?Yb7}O zkI5^rpoOP7EQW?*9SMBq=jrw#%V4rI3uP4*O|C?ah0}yBwXnCG@DomIh}uZdeZ(%q zH1*A1VevYzSK<+(-EuMZ6>)YGF*B{hIcTTV^NWRC#mlLYSuGf@-A}47mC275}WIB(jBZv_5So=ijqHb z%S=My1$#mDtgt&{&E`YQLlm_+{ndsZ^j`yF?CCAU`4@PJ7Paf`Om+9r+JAqqd-<32 z(OU@F=^6_sUTIxn35Htn1Zg@w9^MZ{KzJh9e(ifwrmn~)B&1jK5(Mb_0f|l1Uaprn z=_~p#{)Ul%JC{4x`|zqo?lk8onaN(hfoQb2 z#iP0Qj6bF0*+R<2&X3%yR%-+|(Q!;g9x>02Y(u>#J)%SMIG^atRk0@f2BpS_BkKFHwJ80l;_t zvqGs?5R!r=EV{-Igo%S`{Z|FkGBMElkn2o(RbW1}My|aISUVr+3)*S7aK=6qeAMtS zwYxd+bVxeEDfbkaKG)zaC5u2`f5y2~>0!=1qwI4;n1T_hb`ILwYDx#%|_189(c~GxOl|s92XkHlCmDB zMz)M}t-ufec(Pw+Z;l)ba5@P>vfW(8v)*kETu3>gcz^lRm?fbJYKJ#f8c$P%n0fvr zFwl7~wUj=r7>FTy_GYmq>#q0js5dN$Q-)qAkk5)$3kM2@m4L!kY_;k(Do}cm0}JKN zbVRLoIdwP41{?q-MZ;8#@mZVM>zEH`RU49nZcN%#XjeX4bh|rPwCg(qZvU*R>8W%r zH{VOVNw%s8+Za(kENQ|2ZLU(WCui;2+}$QBT2H&aTV_t%njh+BC{f>6AEnF%px4|| zlki%kQ0shCjy)O5XVwfWqEYv;W{}8X*t)7w7dLPbIxn0y1zwo7TeY#YdFr$JZ2N0o z;v6S6h%hTC1I%Xz(5>}Yjm43v zrsj?HUTHDS@d2h@iRApw0I}fZ9Q_)Oc6sz8tfz+MXcuW0n0*57LuEKtN;W zWr4Ui_x+{z_hCoUD&(%jO2_b|M6sUQU*wzJ1qO^3%!%1uavw_SARA~lr(Z_xk z2`U2oNGc|zIReFD!s)&=Vsm=%*Z+dF1D;lZ(!X%6n{USP^wB^=(1~=T>}BQCc3`#9 z7Z*%dN{gg*i#mvcKryB~@8tJ}7GHbCd8`<{Ox^GBt5FRRycRW7UB@<>BZtx60KarC z#WM3xu%5C;HIY=iYhmSE$@+T^v}&UFrW{D~b%zpz>@s5gR?X*J&;*(|9YdqwgWWn> zFj3!elfojrXM5t&_9#d7>nKl<0Po0Cfz~;Zy^F~cEcG^_--h;Dld4FDWo<*%_+o#@ zO9WArEu+y7zqI3RH z2;o9CcQ!mku2WIYYVR_L)I8nHcU%;9+Mjf6$jw&)nl!?|CR#6%Zqdg@`~Lxj7lH82 zg6UPmfY1A1PIw{|6;Bqc9DMjb-*;H7AZwrBDvTj29W`qnhb}C9{|De=(3&FxvMTW< zww;Wm`>t;7$XxsGqP9kyP1nJ_tAH;vHbJ+>u+{=pT&sX3^t-K#R}Pa>rj2Y<2ihcGno33pVU4Z_fD9Hw*wF*dkT|`dUg3LY zVvXzo(Y6`NDO_&U;Ve+rrlpPdzIJq7J;YXEd6LGLVJ`Htx&osD&El-ef(@&ZcHmdd zUMcq)7aQDSXJT^V2JMC9{~yG*#`z$a7)Za`?P<(UA*x4bEXSb!0dLtV_+S zEbfB~4bs=S%DGu<|Fg-D2z2g1)Mj30rEi|H-VgHd@+XVBBB;)phnnu>TpUEmswGS? zU8SrbGzL&t@m(K6X0WXS;>Q9flT-OVZ_GbZ@bbs!VMKjv$zF>Dj8hvAPq*%!qk6Gu z%&HsiJ2N`8T+ISB8H?Lhb;HZD*>+#^W951yUzQ4UZBRl&_9zoLt_&}-1 z)9?DI*tXX7S`Z?ec>u_<08LOCX|)WtBL#P0?7BxLRkDn^OZyKUV&Up(!`My?%FuE& zw^&0`=lB+~NN9=Nenr|9XGH0)RC(ZxeaueAPdk{gpR4h6cssfsIJ%vG}ni)DU9d_#wo-;&j#UgQCJq8rGhx z6K;9?w2RHCut}pL8zQOM?O}J*j6TROWk8FdqBloCWm?>#}`xG)Pqy1dfBks z*5QuIKu$mHRX_cu(=p+&t;6k*2WOKu56;g)SJDN+BP4R& zmNn;-voJ=n)#ih@3X`(bWEy`c>w_fHY?yMn4=J0#D%#|Vqaq;%5&^V z0dfQWQmZFXM?4yhBe&=ZzKqa4#W7d*a-D9HHtBM}|iVI13sg1>nNXIGZOO#EqBmGjkjwPcdsm?2H< zju@)q*?37j@{7}leV`A<3^)`Ln$F?xbya7Gv#{$p_m2xacJJPyb!w0048_iUoCcQ8 zIy^xTRA-w9u+%AjN*8v7LXL{W*BMs|DtOu#?rGXke|SH_YY#|$?Lq-=ob>L;{?~Ej zTn@h~rPf$~Oy1~~KW{bH<%adJNVRB3oe#_+1JH}Pks`v=@C^RT@ivVI*-tmK=0h9* zvGXsq+diflv%!={-zpde5qQ1Tpmlf8#edh=aQl5n>! zK$?Xu@NnSgQ4M)gZF=T6?Ksu&7%jh1i%;-ssD0G76X%Kr_7LFn_jcqPf&cRN@2ap% z{@Izf_?QxfIDsFu4j;plV^}KhZ+>sa;@(9b3KKP;sUygdyrNvBwL-y|9cuz6eqgcK`92hyMvMH zc0&JV*bMSld98279$-M-DUP6`i`NOe3f14r=S|_cJX62QFamKw6cAD3lzhKAnS~6+ z+2WVuK0I2sd^oO`Ka2s+2G*rwNCauSp3nLN;|A&c0F>UbTW*v4tang4gyn&Ix?B9l18DD5-u8**jAW*cy< z^hQd8iHOy8uB(?H{>7#KV0-^4bi#TagGQ(38sD;~+04}MXFEBud9V!jvjHN*t#}EZ zFJFV({x+NTWHVWLgIo3y9A!NbNr2I7t?mo2`KU z?B#n?yiz{mguRk@EYBkMNk}zk3BR~VVl|dZPsjPl;)kbiQn>5A-YK5MSg3|5h*CKx ztc?8=^Vl;mZ$j_35yj4}JyLj%5Kf&*^O~$Z?dRl@gykQP&En?}g(QE15l1<2`to{r zQ?N{L(zEHI7~A4w%`%GD_se8x?od?J^eQc|SM!I!jqXSuR?+ z_@~ff-JleXdTT%LU9d|R;?HQ?G?SPScC6qHpT`cox{l1%L`2-pt@@syH7)}vQJL!9 zfy8l>_})e zBKI_Y{cB@wr30?Z}CzQ)AklO~9Nen(}uu zx8q9GGnUGpA-Li(A8r(8t^_wwsxzK5b~vK@>Nx}J zf))@V^hXP&Vf*48yi$#kSn_yAz9Zg4zXOA(9zfXktC$__GX}KAVD0P5vizofC@+Gvd{ZCC2sZQ1IyQnH@;be-dw(^BLkZuZ^H+PkW zw$#vEGWYE?Vpj2?$_G#CG5%{+As9a2Y8AROJWL@4=7t4=3%p? zN^PM%{72Zq>E8K<#Kn$>g~UoG5gZkp^~|8(qbKXpg9A%f6j4 zhO%OpwRI95kR4#6aomqmEhUHZhekcM!52Gcp!`?C6H1&U_t4Nzi{kgm8ZrTayj=@+ zj=D78LN_gM718hSc{(TEJsmnVg@*&bW(0M8w1@acH`-I#;^hhpl_^d(!!sBKLp+Vo z^9T}i@$4qa#&-R-Q_a`=Q!amBp+Un-wMC#I+8rJ=7~((bWT=L3wyA#T4l?heXprWs zYAm}dwet-`6IonU`^K*rYz}bTYb@^t+(Lk9XsPMZ?2oACgfB#G$oO(MH`D*9%|whb zO#d2PN@clWDye7sX579OGDTlT4lDi9oq?Roqq!{cYv>{($N=mDJ!{L%gJ?$9keUH~ zJHI(bZ1D|IjyHN^3cgCIH#wjVP{H#ZAUVkO#LkcnkbgU1Cat0lkT6-cVy>#TDo=JQ z1JGA zUN^|~$@roNtY-w?^M&H{?^Ay8qR3eXw2(<-Vt3%Q+{omM+xKLEK1aHPOG}j=1YvCz0D5q$lzD}n-ST$ znZr?{sL8O_Keth${c5`|(i1dX1d^V`kVW+ouuFW%x~66N$`{%$2xt*!S7*6W=UQWu zrM85vQr^X?R1tZAC`SI=4ox<|%<6mChomG|&kW{~tW;1pat12tcE2J@)s&rLZyZf# zl(q2nkAFURaJ7eLfZv&f!AmNSvSug{I2qgYt6z7zP&Trfy}#HPAu zqZ(ibC+WHF)aRx;WgD#1S23HR2#5djV09I`TN1SV|7bd^fVQ@1UE{Psk>XISl;U2X zxD|JIC@#SzxH|-QcXtU8++AAS-5r8kFX!%a-q%C&u*O{Dmv2V=G1kninZ z<0bhyC?+aNledYlI}Hun;?UNWOaq%!%G@Q=8B~63_dY`aEE&QhSHYs_gD^MZ>FE8e@NcC4xq8QQgb79tOW1mIG zqSyNWkqFw`KU*vtwzto$WyJ-m&i8P}{en9)VPoUOoG5(W21cfD^vh@i04fz3PTpc_ zy3W!Pw!-9g+W9RQKOMUjDq7Wlxyd(2C#l0+JNl9*X^HIHmFW}Lx$Wd`HDA$nRq zaSd$&Rju#kkC?c%l2x`J(H9Kq3^pGf+_;?8&>p_Fq5u-0q$s4wA^FlRcad;|;WvAV zZuvrK0$(XcC@=BT6i#xROJX+f*b8+L-A2X1x7viDk`2Hk?|KQrpX$=TLgkIiZc&yh z!zeiP1xztqCC7A^-WjM3TtWjAI&zwCSCjlA*Ww)F{tfS~u+g6&%+h~*>DbxD+R9v} zi}N4p+FqoKV`+XQI>(o6U9)R65M%2yXDdgynQ?b$x8SbQDyjcaUeRgj)y~O!Y&U%e zIPAdYe4vfd3>UMq^Zqk@N=K`(_|-UFgKJ}NN>~%4CN$nwtv$IE6$q!J_EQFi zC9&2j9Mw6+bl+b?;!ZqPRa%)g#ftv;v&J|Kp112?Vhpg;QcvpdF;_zPL0$Ou)RYoG zo;{aKsj3Eh`+I`lwQ$+vsUW@$rUYay&vJl0=iE{K@+%F&IGhA|iw&QXQ_jvemLZnG z5zCz_H?1J~yGA`Ax{;AVtG3}FfmdP zrwI50drP2Ls%*@C;DAPmgX9Qm5-O8;+ozBgx-$c5x1_p>TikwxHs3Fa9FK}T#qY9& zH5UR<<*DT2LrhguTf&Ce-9$Vrof;v35mGt{Xb1)Y?0DZD>gr0yd% zXJxk9u@n4Yg2U{~2>^wB=-}ql8y`^5MyNZPd*Wa{ZdV7~&oJ`_H09xa}q4>KrRBWG1Tpi9Emjw?YbcmFPD01ljf4i**f{@;nfk zQ_@PAe!MN?a^Ag}{ot%_wRgX@rMe9iKCgrCDW@xlt)XJ*yEZ@hn+M7oTeSrv?2`|Z8|1s+E> z=DJz672!|FM4}{9pWV>ld(dUA5k*53X6dypFjF8bvP#~$EvBWbpHjL3#wI#r%*WgP zF6tEZU{5&2YQ4{Ap_&$A3wdrbzj5Aw^{i*Z$Q?Vnal&-wbNYWs6nX~#C8}GLja7H6 z|Ir5C3#osLH3;5r#Okn6_d*2q`vcrbGkk>2Lc zgrrh+g^+!9Z0Ho10hbjWvUy^_=(g{uq}8%}L|H&kYzXe?4v4R6#WeATQwn+GZSh_^ z`kyBgsxqqDTx^%qnd#gId^+9w9dlBHYO3tIkNqrc)4U6@#TbtRAETRRy-1&hO7WV! zULm@sq-0@RrxmX0=e@H_*Dp~d8^tRQf|n;1w&41`SSV*sMIH2EGqlsGtn48o%olA9 z!&H*Cx^FcEbJfIrZcoX{mwZ0ZkdpxY=P#txXJTHAY`wFBs{#~!*>pU78xJ#F>0rhw9C-9~? z+p1$Y_r7|-d^!1|X}FrC^M2*>4BgLJWj?8T)J!QqGWZ!RaFvr9|MD>r9Q%4PhKb~_ zl()l&tEjic+xfZA%t?>O_w*`B@Yac|Cs+Kt4V%}NB!2Ky?L7LkgNioCw>T&ke@o>e zuT`>+JwSYwPX`IL4yz&7YA6-=dMAqPFr$C(Lj`68i&0%~d0$>&Xy)4T)V0=u`qf37 zE=-aWUs!x#Wlz`*5qewyCbv=y{FG+JNa9di@1n1*_b|9vk4}g-55N^l(P#5Ncf>dq zYJ64{M~@7eZ~Ws>u;N9pA&Z+VLR*&6wR}TbklgDiu8$+p3V|_Rh4lg5DXkc$9V3Yi zOXoZ{s!rt@5n+1&^xg$BQfU)L;{U2YhybV{sYw*m9eEfd^+8Npl^T2F~^5!qBx z(b3G-9@V8(X_(EG1s{0T55+c@-^}e~_#kt6iZ}H9vEr0&5T7_hi%#c}C1nGmd7Cyn zAveuEz%b`YyR<9A(3a1wdASwPDAQ(@XFm9c>b~0VcoTn0a-a=QJs>wF$kLSI_X`k1(!E*VUxNTR3~O;4^2jz#6J{ zl6!4L3R_hUeYfZK0E*xGL{{x+cDbY8pWtS9=9V{Q@AGSYNm}L@X6bS>D|}qm@&jRB zLO39U(qCIG3dyaC?zmMe@ZZBL8uE=#J$6VI3!vaJhh2830>XjdpQ>hROy%?V^SD3{ zsYaCQPlF?o*azyzxNZb=Q6*N z{q7aEUC7tD3xav=ciCLn+ep)4XZGy+Sj`qpaj)G;cK)4>B^mUX>d@RYBHg1jK@Uny z)!P&K!>2=E4~}+SgEmGU@9W(}xs}hK`L035&`L+$^%1AkU(?dm7dEr`JnxF>b%B14 zbGxj=U;>jEr5ew}*!sjN!4xT*G!?ze;WecqX+4_{4?3!h@(xBeQ&Y(!#A0DI9=Jan zX^zrm3Z^mI5DDA(7Fw+gP89=6K=1`yx6Q6WJ92DRoCGB$)OE|w17DVx)>NZ#usyc+ ztn!&*Bp|94NFBf06w7zAazg3p$%qv6<1nJ8+&#v#c1$cM*R2ra%iVXAz(xd&80WK2 zEU`3`rEq57f(A^v8kd#rH}p*P=UA-F4+7mGUzsX^-hzh-=OEkRCKpx~)5+zt41Q@IE{mfNEiW~hQTrZyr^;=s+IeQ1N!8Ug`w;E9qk|MPzb7=RNTw7-XMB+J zv&ITi3mqOG8#W$Z4AJNV)e+KUpRkNyEisWyAoa;qC#yleJL6LxE{4l+W4}0b7C8^w z^hcp>cN+RXX(mh8hm2X$U^1BYABXx*n*YFLIVW|_cy8dzUPFDaf#XexVlX@7fQhAW za6!C-q+yrZ0?SmG&6seHoRnlpe87bJIuCpv9%7n(#SM%>5uNigeLAe8DDO#N-dcP< zLz6d6^_8M=R5uulEnWH7lGN5*1jutMLNRf#c-KzlJc72=2iO5Keft7|dMpboFO~}t zHZRQ4FtMMVYA!8EOcBU}CsG#_Gh?8K>_AL$swvwRw%FG1ua~5C*l|~53v+P7rpUdF zW*()>OMJ6VxhIp77onZ-(A)pX?WIy(#!Ke9hhD{x+mY`7-+Ey!`HntYj3qy?{hw3y z{h4x^T*9MIj`6Zzrmb8mMU?M1=wCTsLR_j0kOy3-q&YGOr#-OEAAu6yRm`1f=lV-!7%it?PjH4ZR=nchGVhjHKgtot zYtnyPT=T5>Y8u2nWvLN6y1wz8Y6nFxs0dSsi}&a`NuPacm%t6JYZ2a9h4q4KUoclj zD_OI-4smg!4YMoZvFz8NS**U%neZ*`%`hfzk9J)}N(2IKd7m&EU2lq5EU!Nk$3YPMaKb>xe=X$}-b)b*Bp5B6`RpP#FT;r!l)_IreVGN_rUVidh zpGBAPEYjhLNp>n+lSL(&0UHy?Uk&;F6QDQGzoF^HT}vQ|itdGccEvA=Nv;qg0ep>Gn=R`(6dT4GJO(K=p$BmR{V-Tp=?eQTaHWDJB&Pldv&Pah^lnL){;hEapMP?+=(;~Wgt6w42gn3W1EncWEuBixiZen!~vJI zt*!f3@z3a*^O@Aa;hI~jO+JW_(%|x^#(;jWD-~ekJEN*Oxg}}&!VUu^Qpn|@MkmQt z0fyir@2SF-VHo~i0`nu%rQG4Z%Wg-RlNc)#B}&oYA0|tcg_U5RpbrP@P=(FWXMz=? zXvwDjVojSASVrV(YW-u1c}(;!I67mRHR$ z9e{4b&S$QKW2PD+W+-T`p(D)mAhO_f2T_&0DpWlxc0NKX&FM${e}He9j!hOFMXwGp!$rx#IY zt&GIsnqFTxueB;mxB5GZF4`)Oc9EIbu7eV+$Pb$r1p&1L)#U6bVuMkrTj>9JN9wA=KfZ61eAxvp zaBonM1rnCLb$3HE-x)rSSB1N6IMQRQ2p#*yYDkw;o32WBs+9PrgJ)r1mdww(YC@^q zl&s0g2Z;w(H2QA&BtXs2UmxljD`Y*liY?3zd-OpxE z`FPDq33C!YX0Cipq$Ql^=m@?v*M(HOjXk8+9LKXy4)3;xmy*#RfZ7kV$lZl;u>}7? zrkvL-I-k(fU6iBl&z{~*)C<#4;o3G%JQuw9z(zDOKg>+KuEN5muQ@~OesyOsq)+gP zbdyzdqd%GyFn;K4vepv7f^?`^c;4em%e3S*tvJ!)_CkA(=}hX%hQm1PU>Tfr{VZ!*D->5TzL?P&EOIswFKUy@TaW`rf4(Ou=c>GA`= ziyf^9hdIl|9@M!Qty`fGzeK{{2TOotbe)Zu>}U$NCImm7;8;>hbFP-A>mBV)Ms<>9 z8}93l-`}-ox-N(onmQ$<#_W-izTJobTI(XPEe)|p7wbuoaSDJvqcF~X8!(p+r9|rk zis}a+D9WrzD?EoDVtd2%={b5NoE-|JW?%HH8}zwEntvbutp3}fdIKM{@)P5>%Oyp% z#m@TKvaH}anP(K`_INo_T8+3>Bej0FQh7zOl96kx+DX?x@o5Wol)ltn=GBHUutPt|X>>&^-6sh(Q!}y{&vJ zi65=cNzQ*ncHLX-#N6qEC7vpTfQ?BaiNsC`_dYA$4hs23YemfH0j^bN{Ep3;pMF~T z1}YE^79YN~7z$6$`+T^WFTn~8i1}P`s=Sg)Mt~trq!C+PvNqT1Xr}9!#?PZK|8GzY zx6J^$=vH-gxn7BSv(niE<7vo>1|+H+EekvoH>rCuLS+~_C?`m9x?aFNZ#_T_eh*T= zvc^ah$sW$Wd47cm1)=sS)9;qO+Lm{mW2s?4Gf&VjQ#(bmni=u0EkVYS&v z@-MneyODT*)gRuM(s)(T?XY{k(e>r4RRd9mkYn`XKGE)4|N8aU&@<@!-ywPS?-^^9 zU>W8Fm{wL=+%s)lt(o6k_R1PIwo-E2dKhdqmo3!+C0;8hp~*;JBbJz>p>teu_ktSCE5q*ZE;`kdMPt zaTq746Ev23QfbxT5=bFh*M-l}D39tmHrbOzZ9DOKRYlP{OUiaz*VT}`3ZU~z`FX|1 zbimsX1$=wCS^M~w#Xs+3)waC6@qKD6f^C(huD5)HxGKoTdjW!5IIc73^!8PKx>`s^ zXf_Qlm#PeXu)iA4_;14N+6~8;#@SS2loQQ5x!BGE2i0d3>s0-(&m(%Am8l%_;R)j0 z;j)TMC3(T|ZB?VZ{%5nt?HuYq98!q^(Y+^$jTa~Z`E9ph^|gGShHibS85NgS?*9ID zu>pJ9%lxyvTSD?f4Q50YDtC?-vLpuy)KhrW)EAzDUv4I>HpXAZIOQ9sKwyHVZ2Mlme*@1(7iP6sqTDlyEZx|4h`8SeeZxG$-I@=q1Q11*OM zfCb5m#b5+FKKkXEF^?i#lnBn2=pZvvqi;e>k`uXMj>U-+k@${J{;S zXkB=gZnQz;dH3Gegt)orDs+#w9{MG(akUWp(zV2AUdoOVNq{Zyf3W5SO*osl;swp$ z(Aoc8np&lQa}v~?WaS+2|My7v=S2@R<5Rbw_x739War2SnblU0*O_r$_~T$N1O+cuN-p^BTqR6i5$4 z$W8?JP9)xIoM?GMaF^n(X+8`bQ>3xBCZ#L6DcnXjGad{)Z=xIn9)R zI{r_1g|6ao^i(Q6FL=M8n%pO?<25xSSU78pA5186aP2;KM?cq3xXY&11k6MT#_}j^ z4{#3MzlX_wDqArQSG5HD*u^#Mx3TkrsJa_WfpGl1-Ec+# zvyZE{xnXy|gcadlPy8xAJj4eokjq2G4Bx#3SFtSXuL{}X=S}9G97r}7@e*VA2sa-b z3xZ4;fQa2AUI_Cl<@7t17|cb-v$y;n|2P%D3rp1uC>_hMN-{~N{kp*pyF4-T60+-u zYcIapOn&Cr%d|cXnEzV8Jl|7i--*_&L2>gd3}|wf__#X9iCls!d(Xp8g?&;D)#;)p z`FUKV`BkFl#{RVp#@AKP&J_)*5xWR4AcgqG%JT{uLgA;>*Qt`!?sNaA8FZS}{d3oN zr3kHPm^V7}`gbE@}+10C+k>-r}AzCqsVWcE2; zY4If4Gbu0j%DL~v56qdnHVavu)5nFe42^^7cO|PwqB%sZ4pCKV z<3C<&mrHAox??EY?X2D?4MI9NldQJ1ewC-kz|9unQ9Q$vEN*Atf%p^d^E-nMC;Ms9 znxs$1wW=HRho^jLnuN%6p?;=tM~uzvVisoszgQRb7S;?UN3KB-&k3QjllYk^h!UH$ zFjg;9Ro0H=1Dp`Bq)3y>Hr0l>lNG8;Y(j@lUCX=LW+ON~DGT7CDVXJQuyY;#T`FsV zem&1@bkHPCjB>@dmq9b2-L}5~n;7LTiNK@E&<8(ySG-W9Jai+L(f)2;9D0)Y8Ho1> z9Ev}lNrSpi#PR#92m9F-6V#%U-W`E56UG;H8lJyu2E?&UWFQjztexOB?`6tGCte)z zeq2MZiwy=N zsM*ho(c9)J^LIIz#JH@h@nbA6 z6@7~uy=~QGpff#9f3x=?(e|L@eL9KD#A)4*VpHl&B)+RIpgRLlZ-XCB;!A04(m~G( zN2?%ly8Vw!8uL{^-E75{+Dz4}|D2A_f8I+Eu60W=on2Ay|FQt*f%c+@>!p--hcbu!{iC-3wSi-ndo4hWLzfGXsEiMMV>^FEd#YuR*6c(2Y_Emn&) z=sBWl_}0aQ$;W4iyavFK3I*MedLA4A|65_)Ca7xP$JfG(!1zqge_qj~%=^IR^^M$z zoqQLx-ii8L-)dEaFG4Xt4j+I9)teNfynIg1Vu%l?;NtMum4%ecN~^#Xp>WInbZos` z3p`T}u|%|V2+dF5?(*&!!NqDiE9}59%A(JC6|wgaAB2NF_dhu*EhXB7q91Hyul;L2 zmRs$UKi%UM@^K_NU1jRx7lP?Mntn34!72z`CT{ zX$R@$hNR@(LeIP>mkC2MAOw;8wtvez)Jj1Slv5gx>K>TeSl5m1MDi!&E&Ca z##vXG$Z_L;d+ zRs+-Y?s8{)9DL)D6&GC;RYzbXhF%wy8c+PL3%|6ZSrLjxEe!8z7VN{^T=3`0VWflP zXNj2hRFS7;32Ts@DlW8Vd;-z)hSCnHDjIs-meeRA^sTBEJR$l#x0J@LLB=D4A`zxp zr%iH1_@)|piOzQXV&HOKWn+Fg2`n0@7foqKnOw-U4}a49I?3^`;SUaL>V?fL`1ut0 zlLbD4IM$OWK5D&w@dSJm5b`2`HS(YoxW56;PD~j!RP-xML^fK9+G1j+$yWGh_z>Y7 zL)q0_)t2me^CK`90gakIfy62h4PerfqwZgJiQI3Z9 z_EEi_VovrKL@Vv+OP-)M~Nau z%=1;Us3KF+(f%W}Z1X>vYnqt-$7zZ4G^)Qk`DhM_BmS}{4Sz@JqwZ5NPn$!LamKBJ z0VhK?T;xs6{NckhMlWt33NuOhqd_clJ^D#!lqL--QD`uzNk*q`$;y1RF_VHJASxL%pcGbGphHO~s6 zofdZy6&F{O%ES#%LtR=J*>FX*&8$OY-xOGz5B^I>qS41gk6IMtK`WEN?nqefm7;N( z=WG-ji!o{LvnfE(Rsz*_=^$pVZ1XQYAZ6}U->s5#j$4(Y1y;OCr->bntp^wO%87Dx z%!Mk)AGN9HN(Y*v3xw38tRgJhs?@0Hmqoxhe4siLi$SSxS2)wt=iYrR=#HJZM28nO zoQf-%m|>ERY9d|bf_&(+M?e%`D~UnwwsXI;-I*L!gBAOrRkM9U^0)Oxt*`?)kRCzZ zx8^Z-4hylwKiCbp>x8Q0NJYzY8HX78{`-UH<$n$|*7UJUx(AT;h3Sc?T4E1R8eEs} z7V8S&o7WxR5&s^2-fpx(tU30M=FNK}uW)B4_ z=nv|esW~7c%c;wtjRahoifQ*XPzeqWS{e92)iA1Kuki*fyqjM6e0z9^(n^CX>1-gz zK%6%;On9^E#fvI7W|;N~Q_(5iLKGMTCPf|yctP${WgH2|5V zDsW;;M~GAYI^Z}RxpO{Zt2Od&xHh^6-VRO^8o+NA3!ERn9* zGX9=x!KYCr`g?7EB=R5X?R-u#?^1-zf#26(?jIjlBQ#z6-L_6e-17+tMXOraB^4t> z6wA5vrYgrRV~DAb?T{tZ1fB+zX20Q&7y!=f$k<7PK6mAk?JL7$f<>n_odvB#yWI$U z`$deJJFDw*V-|cuvEtFBT8R7Ak7(GSZyRpZo(6V&-)L~X+89GMjkz`I;8vx$-cflF zt~T@-RSNQ5#C46l8%GD6Z1s^lBJue#DFp#e7m(w~2!G-Zwv6}EWg}qIDJ3ha=xz&; zy__eudiXS`P80~|stXj`8nohX6p8-%!Qvmg#^+h=5eeXtr7Jk1rc49RFqJC91GL7} zXY#c?R?k7IuEicvkw7kQmLMH@gVOhmNl~#nwSwd)$K+a(2F}Tm)@s8ta*gcw^Uwq) zyxxKNTvz?qiey*d#bNf(;7E1>#WCXJmq6iWr^!_j_X6t?-Es^<5(r!1abZ@m-YiY0 zF%4%WkewB2z-rsO_vcV`VX-=edR?=5nLASBHv|-m*P8;&XTfi&Dgxdv zuF(FfHbQ==N`h;hAew&H3{6p;{wSYlfaBAeF-J*_%dF&hx6`eu>w`xX5DP;w_w|E{ zq~r9g?&nvs_bbL3O$TQ=$^?~{Svx;`kx-Y3$|dpKxY4!r8W;0C5u5wtPW1eDnfHfj@5ufp1+xYU6TD^YaWSuHT@6qQ3 zN%Zk%R7g8XD^YNY=EO!Np8jWrst`p*Xh2g&yJd72i&zB%)?%vKP1zn#wlsixzq_4& z52fl9Mr=jnI>F;pW1Ro^f|_Eh0&&zgKjLooXHD2&7(qt5(ou)2WT}xvEPL)(QKsS? z%yS7XOg)S9Z-b*RR}Cf6V%)13o7pg4Cxt4}9e1ff{i66|+hAbU@1|NB^@=CQ!spxxFQuEG>4dXz2PD9*m?MBZ{XlsnxK z8)FmcJywY2QiNj@g`FoP!Y$FmBLX`A1PoPN_Z*ox#Sz_q%nQZ5}Whdm#ibjWbZp1z;|6jHPNX$9sX!edzj zh;2VGTm8AFNyEx%tR#lF9IU*CLs84K=8Hp*0~uuwW90qgSK2m-xM*odl_3ZsAh zQ4L2^0x3plc-#Erf{lSbw!Hbym`x8^%URBYI^`hJ~lZo$?^%8$);M!e7uGsG10p9${V117EhR9Bo~#upuq4%;GTBgel@lk^zRUmjy>y|(Z>wdS^EreflH1}otYPqX?y zRtk%8SbtYq4FC9h+Aqe&q10k1NmxHEM;N-D4(>B(#L+aB=+_ia4c1I@6!OjHP|noN z1uH6+o)XnD5ng6HUf46DQ?vxKC^09P@?)7W{-6TqeIDghoKmd#^`~Rcen;~IJipI& zkdtA1lRJWPy9ViPuX)A-=}a>eZRvQGJrYGOv~*}FvrEPXXY($r;MLfFK;AV&!mQH; z-gK4-`d8++xW|-2-~V~?p!jZJig7^*3)laq-x&N;gx_s?H^TF2zQO;7=&2t`rpvKIu1$t2QK6ZNs7Dj2?ziw!? z0Wrj@LgI;s^vCFlAHPv@XevMy{XPA^64MLog2VuyP?@7r-)_6B_Rzk1pCiVoO4;So zk=-m4G-MVZAN5B$AMFJ3HK$Wyem=~e5s|2#&T&~oMy^Vtvyo;akXAj!yr+8p`){A5 z^jPvwj)0TZd3?L#{SoeItE*;kPk=T*8RQO8ypKH{=`&gTlkTG~k|gfGV{;K9;X&t{ zhmlQ6%7ReqT+4$G!CnpX+@s4irpB5aopn=11$XbjTvV;>9Gy>@@t>L-a~b!H3S>`? z8HVZ>)Y{D6q4CuwgFKrck14>Ha*YY}U=IxCLeWE7f;mZej2#zj!}S!eDDDTVE|H+;>It0oYU&ro&dEdxEBAXdEq_ktr$KuL zLXgRi6IMBh{#w%#)tvu0!`U=yD9+dS&wY|y<^xf-MW|&D3dCcBsCeCdn zIAf)jXw}bPx5Wae9_rqtLXC-^T)=(7AzbWQGdr#wJ&ZjcEiYFV4g=930SsG93y|34&IQO%f zkSX0Qv^_#M*i~_4kp*ch0_C?E%!6}jV;Q{*a%{TJ!e|$I9FCRkcyQn24xRD9&%&U` zBU5cys9Mr7L1>C#k#zA$Sj(>tohyK5hJA+~bE!0qt1f-G5x+iKux`jLJ7pk)aq;31 z&(yp&66H2t4=HWVYkAKGhh(DQD+HEic185ykhiACdNgV7?J8l2Bqw%KDV7+%U}Npm z?BUc*AhTFa=sD6-7O?tvR4fKKK_hl%?`e^*{WQRW_x&$6F~TO5&N)r=nthm8uM~B@;wN2_?zrYW0crH43=hMd%v}y^!BY>+?8uG9#fH+o~+?@ z=Ta}9uw7EuLWc#=ev)+thcSvlGL<7}7ay!X&+T-`nb=HV>K$o5rTo4?>@WoRu;HAL z4-FvD3tBw@=#v+=hbDLt70x&!+=Y zGy}Y0)=5|W+-yQ%VbR9|%|~4=1yNSKCnH-1b%k}*;Bg1z&UrEkeMw`@$*E*o&%t#8 zw2Jq(ir<0WvKl1%$?!$grL%^vkL#}0itF9(khZoJ{9n666%GI?)K^KHn92T6Jp<7C zFa2&$ZuZW;`R_a`mdWJ~p)le6u{+6-@e&GAZ(h*n9QsZuDiTm}rpVhlCTRyZ&j=2K z54Wv@g$5rlqof|3$m1`WO)A=)=^EN)X>XI;Bz-^R-YVKA)uJb)tws9=tfiB2g4D(=I~wOQFh!J> zl|~kw^TqY3ex*7Ky*|q=eA;44PEh$%Y_6AhG8_#1zH$JN- z@l=9qt|hBPiLUX^D~Cr5nl|nGbC)I#DO(Vty&yOm?5(qI(?`Cai(5CGFE|5(wPUU9(P_87g8{Dl7}g1?k205BFM8N9G~gI&bHetSZI z3KW4~G8XZlx%-2)#l>Dy99{D$H9bjTPY_pd`l5`R$OEO4;+}4@JxHJZg#eq;<66B+xLU z!EqhNuLN*=e(43GJBqoe5+-fWN32*Pj?xi}sizQ-@Ny^p^Lo1ZRDyt|hYY)}_w$$g zHqtmwJ9btTx$+nG^>!y^S~bE6F9Ei_Wj2=npV~*tgCKw{l;!j9V08B!-Fjx7G9^$D zh?Esg$#=H9MNUQo#o)K_0758`6a6$Wx_?zuo}Eo7zvbx4fgdfEayZgq`<|EqqFB9h zA7N*aQdcbuh%ro!^~f%%Egy=>PT#mOn|QDA&=id|kWUbmr=5YCLx%Txmm{YGTO`J| z&Pkpx9-;TGGbFDau++4Ut z+#Fh)PHJ3Mq3#(;ehJT8)wPHpTxR&~0jNPPdTkjbduttt)VS59eFLbXMKXkTZDzc- z-{%mO$X6hhbjGb&cCW*$>g9G0-ni#wUWeS6RAGu+Cu|GvdDyCr?MJx( z^$s|`{W*>I;3yWHGWste)qpVgKW?uv6-RT%|Bgc@|9yhNim%OrZ);>{#qq2}2nQ!b zs}UHNDoHILMlawTy-r_YjwUET;%3{r90qVZQ`!8tn#fmipf4JX=2YxAgqmY#gVA=j z&DjV+vFb!tOiZc^y?ArrnE?D>>{3{dZ@rTCK16ArA_5YlCoU>y?%64=>RfL?Bjhwo zp)ePVo}{8eb8zsZ{|TGIehJZb{ujG4hpP0YVG+c*d`=0S3G3UGz=iU@xFO`}4ABMU zDpY|naKBn)2XiJ4EZ!ubjH{h6E}3uKD-uGbouW%P{4oG8 zd3>w?_{^=d#|a@q$zwXqx8_2l6ptGXZ6_E~E7S7*Fwt^nIgZp)@3%J=JLnE{?|9fA zx!rC)+eneG!8ga_+9ew%qeIt;YaC!N$oY3%;u%@vUf=9y6;8+-&t7*;F@n?!->*Ip zmuo|EGf5>HTpImVtgmqeL4k~f0MNTL^fumXc0;Qp^8ovP=b)2;J`=t^RETYpf9x*K zpQ5s@{PaZ6E~-=@HAjQ^o2K4q#=6V7%|-C7Dn%wbsEpSxLkVfq}om?s9-U_;%Vu9+H^# zR=O$%YP=q;u7VF1A`cE0P>VmR-_&&wDvS$x?d^07aCJbMANFe&4PJbn?~yyLe6RfN zerkF}zUs$MH*xQdc%vT4EveAD)FW!FVGyF?p|Nnb-|q=jC`N{t*zrN9D{t^|nO3x5 z3o-SsGFY8ZFlGK#&WJQ@IeiJIHC~@_L5ICYfYGHkrV(=JqJJ zRx?ZPEP8OKO@b8lD&2w`lnPNO%DTXrCDF~^Au2VE1!sulCK42TX;?>DUm0cz-#!N* zHWH7bhGguU@-$xGpFtogy?!v!+CGFN7{(iJ9tB0O*G@E>R9-U4unUVEfmbtK?vRH( zgLciA;f*%UmvQ`Rye=#8?Xjkc)LzQV{`KS<-=?kyQG<@Ihk^~amWOrUUHhvoJHAW3 z;p#8aVsi>-OiVThnHY&w_lI%NDE|*!#*l5T;>OaHuX4e@$0+V9`%_>CyRzK4#HHy6 zl&zd)($gJjtCICnz`PmebR-qOte`XhR}sECfQ*|WgM7jfhgNX9Zg*WX5t^qTifm?3&%i$ zGV9}6z|2Mi*T!Y*vM`awR8!TyPMLUnbo0d4!;yybdo=~yHaA%m+ye_wcP=F8f-AgX z$(`R`!=CMGyw<_+df7~1!#pwlB`JQH(fH;Ooc%zTbzqA}pHL3p+ed`8o?>nqNH#Rq z()XJb?nd|+Shq1Fx$7CR9k!YK-tj>XHtbqBWBE#;1ELxH+U#3;u}rk;4c$?16R#pPa2;Q1riPt`fP{dwaF{2Uqc&tD?a?%5kTQt;&#InvGz z-n8+>Dqs3T$iJ+OR_&Mm8Uv>FN2-d6>xvXx*-l*`T_dEHam*LGG&6wl+WWYT{FX)? zDQH+-UV?F09KX^?P1>mDQDnj2Z6l2mOl|O{NUoEn%7=O?o)wpb5X)h~@(bdfA>tgU zvHD|+Gs09GP|L5&{ulXNL#0Fj!;`r9@V1(<4z+~PyvTq6Fm;xu8CpR*1FVcNbe>`h zM;k<6ANrA8(j&izs1KLHC53pR9N@eFMqiP&PDnjS`2U5$IUuL|2kTW^~o4N=u zOmlM0*E>Nk3zXLnca*mOmj(E{4R1yhO`I&#b;+v{+4Tg)k5qWse%x;80zw|5?#m+o z7#!(zI{IFjckZ}d)x2&M3@m5@=lagMny6182Ma22>>R@odHKZ;9-;N3)9I7t8ZTV`_SzQp3Jiy3m!b;F!f$ZwMY_b=(*Hh z1BOYrO`RBs`vu^T@LW2Xo0KfHgv_tvg_djOX$qspr@s+k`ybKY?}#ZCs|Q(C_-SMz z_h!m6unxp`^uL4`nG~Kao}lzFraQyF1C?UVF#2niZJgB6RA+L0?z8IFR%P1$K@XVl z{;gy1zb{ev&whwK2ZB(N82o=v&18%|IRn&9?zL?Xv&?!-Z)aUktL&xqe;*VWG2brU zK@ObEU2~o8=s2iK<;6^?aRXKo#E8XLQ0&m*GfsX@ilHQu!P_Lu*#L{TruUe^=awj; z|3}j|1y>es-NxzIR;OciY}>YN+vtvM+fF*}*tWT|V{~k1-<)&qU-MjsOqS zU*q`Hm!}#px-KanKrRsc*mWqR>~kpKhviX0q8!wA-#+fz#~74n5tB$adqC#Yb(yHV zH7zUxVWIw6$%JZ-j4Rg?G{J5-pP-jMV@c->e|Q#9L~_UnCf!Z`Vy1l_VC zls$Q2L+l5BCR0Uw2DUT&>5%5OFRG_t0tlVBMy?Fxo$gaP+U+&AJ%Xn}nxAJNTk+8M zP0qtlV7zWFYBCF?u(d{|)b`t{kKKXwz>azYS5-Rm0K>h}N%N%Fhby76#P;3aqWEzx z)`3k}jmH;^bY3;Apnvqmc0#zp1o?Zcs7`guTF;QC``@8c0@n4
A+W(w%PQ8XbR z{=O_excbs;O5Jve_kQ_pO6_6MDfI+S`xf}Qnb=Nli8}H2*8Aag;~20rHox)Ye!UjB zL&EomzH$@+0_cNKfd8Vn?cMN9G+E<=heE?`F3TKVc(3e%74*_f0G~xO&}4 zYPa}ctlJYT{j{5)6Vc6B+if^XkhW`=b^>-J467{$R_oibw+&y&4mPUdb8rWno#_(+ zH^W3Rj8mK`ksBKwDW2u)?fq!cwJyx$TGz9}F6l-~l^?y6;M3FBl~4_x>)Q9?`}tj{ z!;}@Fy`)bhcyopm2FF}8$d9?EZTT*GOpMo?r37HX>^c*r zd(J8v=y%qo1j4^VP#>KQAT68X>6nqm(T!@8*GSvy(6XQ&iMhA7OVFu&&TXv(;jt6L zqB*wH+ohQ{^%r2Md*4Ca(ODT>k4wTW2)V;lOmbOARa2u-Zb|VA1bs#dHWM3&tbBqi zCHtCd+W4?iUCa$Im|N#rNa@Nh_!#(CYUk-o*Gf>k${DjU9o1geuDp3}de7Q3-@wTh ze(?4#cl!2Ru5a%#zAwV$$xiqXtoGC@LR^3*1aGWv^noUHleZpdh#X2#jNtgDFguP~1lmwK3hWS|yTAJUhjx$Z~PddKB55#i&!MMLmXX2Yy z@~kwX6n6{>)v)ND;jGMHy~TX?dUlnRzPkLgB(P2>T0|c<^3v)BVnP+wUq3YFSHY4U z_0yxc6D1eoWhHTfS9%S{jsMqGDWv4B%Z$!~`2e4%52?Gh;~1IlJ;qRy5pgB8z>FzH z!s?8OLiZ>{ZI}+`S~!fm*w5d*+dp-uOSQu=eEASg3Qs?FuI{$%DL;FLw;2Di=)X;& z(x2b*{Rybh8C~8w^Xxf=#-L0zD%}f8 zMiC*|r)_U1;G6u28U5_n_CX5#wfW~%G>pwCx#nljF zL#5|-#%Lubvj%GR3_vQ|qIoLjuJxD@KWc4);-CS?whH`IWyBV|E>>O(-_Hk)k}!O7 zPO9*%K}Rc(+>@;1f)?WMP2UYfGeE1rC$pP)yv>XMcqBXuCc$>beSP2Q(JB0o@)$;1 zlE7OM|E0X!HVSW(prg3uTMW*d%J7AXzZFXJ}cY|KMP`M z9K8YtV}`FN*q&_z5ROlKL(r{e)L^x98nCi__3ylE_sqKjbw0-X$HSo0Kolbwm zTaJqtBx?!{qAEZ_7L)SYqq*w28*fO@-a;;^{k@PkqPZr0b|Zi31*>>H;mzn1>};1Q zZRm)n!g~IZpjv)Zj?QRCT*m|$mX|%HoNdP#<6gur(~P&w;2FlHdyTKC z1p4K^ZQ6Y`uQkqVmH&a4(PF12Tu`=R32Uqyf%hc1P{UZW9;gKAFAHf%;PxL=TS_Yz zQnxlwiw<8fqxx3G>?3l8d?61aFkX={2VzS?l-b#-$r!n z5hGT3>_jh|C>tA|U~&0U;P+@`wz8|FxVewz>e3~V17vlK&T(Q zkG**Y{KsbX#<~omX#?Hg$T7?)5w=VI-qIjM&f`c@CU9c-n!j;!BY6bSr)0_E1)UxD z|JHguI&x|D>i?v^U18ndBj_46Ys_yx@_`m4Nf@`A?{#b}-uJEy7<<;E*zE64QnWVA zqjYHuaVSI{<7g=*K0>K;JEn_8`R?GkcYfBD?Fp;a#}+y@H;q3DtP}Exs#iVI$es%z zVvYO`mhmZBM^1IQK!QZrl}TlwQTinwB{UcPx( zt@mvYS;g`F@&yMQH0k)cKAiv7E7tdD_dCoMv=7b7GHh4&RE3w#h)}2EHYh6T z`Iouf+AyaTa*S#)h&7LIU5MwKBAXtEzLpg`aPnYvXMZ(^TgO!*ST(JGQ{V8Ar_P(v>c@-!BEw{J*83FhT{P{Vi`NdI zl4pXcPZjlm1ZOV(Mf)@*2yeXG0aNSWuBzw$E2`Ppx51;SIhJnIFK-Wr`bgioGNLCq zUwI#Q(3nED8W-kHiBR-p&7J^(pBcGTwv?;=vTn@rJesSFsOpgn*!5oY?p__#ChJfM z`nUo;MH_OzgqEJZpZf~7cpW1In%}(dbu%y8ITTx}@kvnQC>uY`$}n}^)_a!MXqvc9 z1u?7baZfr&`}mo@s$}QUlBFa;9<-k8+?{2JtjtJ-d29-akHI=i!`*aG^mP_eAEq#4 zH$Od(W?}xAh6#S{altX?RGSvzeH^hg#TW{%!EbGXC*UpHifi~5Nd&zn+@#|nBHmk= z5vQk|B`S}#%@VdbgVw=|fovk=VIAID$=wtk1VN7R{t;`~p%wJGow|=u-s|u%>sXg1 z_%`f`ZP@X=w-Pq4pidt3xAd7{H56CD-PruT!hK$~^ z?Hglm=mB)EdRtZ3U;fCxS;G@#jU(!{pk%cm933>Hbf#zGxE+EMW}yZ*ehw!W5`z0C z4i`^Niv9iz6&*eiJ%;{M2p1i_&`1n(wDFp0YPQ$g``~LOO-JjwIA-370S_lQXPu9q zIh~hvuGKT^$9Yw{fVXBDb^j}Yw}u%3+lz2>DXkLB^J?;FW;|n%pE`if&^5&Qg%E$i_k70g!XpX-bTVxT$p71dMw!TBL1M!Y)(Z z=_wcJ40_d7%wfCkQx_pN} z&`UhX5^uBKKg%8>7DAQA>5XxX{h@57M|r1%reJ9oV%UGBvG21>9k5s$^CI{`Fw3(% zglcuEQ@1$l38;S$2ka6EE_8#cK~0yzjvz9C?qyf)TWwRIU0Frop6cs+@T>>$`Sz`& zp8K`THq(TlS-Bt$B09Sy#4pV>LxfMQUzJT+%t zML&I^6fF~pMdPEEdpr7N8Qp93@{2BOZyDf-c}4>yUZm~VQIR?1agtFLhn(>Czh%{bdIin9_r70w0%rZ|KR`|TPmhAu-ol^D23i%Hkazli z*U`P5Z;ZgVUbnz=qQENNF8BI3+5ER;z*&6HT;=0aea}tv8xn?(>v8tPw;Hhe5M|`y ze~Q|dDXq#{DazdFKj5SuhjM}6n>ioinVhM-<6a}}AJ(ggC1@2&ZcyZnE(a*69#}V1 zq8t`jGjvf*&$Ie$gaR*ux!Fa0^7uR;S}ulm@&y#wX$)9+2a;vQ)N0zlm+7auJg9~! zP-~kC4N`-<7+`LTJe$y>0CbOT!nM=BcW&?ywzzdvz42wyA{aoQbcQhp=}=w8grgp7 zIa*tFV!Km-0rNlmgr8nk^E?C}5u`JSHtRvVi)G6jz`%bWuz*!vo|{_y3OU^qV!!6I zp6gx@K(?T<$iN?LVl?v<{}&WDK>GU`HWO$#IR4(=b9?*awEm5;!xI2XyIidA-uO7E zC-M~hd=X`><-e(5EEP(3JTj69gBvw4v#41-*Nz)Bbcwg_EK8u8JE^by%pBQ>C3>f_=0 zd>5v8(=T6B>^KfIx~Wv|BEI7!Yw!}tM46BKOxn5He5>Ix3i27|`n`iq9Jd=0YB*Nr zkh3{2G+E(?PJLQQp0E+s4XKEK6k^m7X(bT_Bp<-uHk2;vWQwW8>i{G5L5~ZctMu*5 z&jn9HkHEbvzH`}DTDo(?w;}5%x-#>3PoVFTW^cBjr>b_Gpf&z*{%J>e{tBAe^5I2# z=VdL)Wk=^#hf{{G$HrS8BS^EulO(|KX%sY5e0c>Li2T*Cz!3OmX*V0Fx%$!YR8uZN z!h1zduR)=fYAZEmdOG-mQ#D5qxG2xv1S*2{+=x97D4m8O*Kv zAXvIICz@01w?P&LcYex@*{STk_#nBz6kF_aOz&T5+Cxs6vWbME1L{2koeY-$`3r)dgRNszH$JG!iF8}B1XZTx}!fnehRbJVonIcW>!r_^%JRvo>pHLv>bk5-BM70|KXzQniJpT68X^qRRK5~*C#Y1&sBMX z+btuw;{^M1y3ce0_k-r|cNrw%9TAeY1i`wFPFK#t31<6?tW)z5GObQ^+XJeeno<(c zS!qURo{!ojY6IxstroQlJ%ZP%ZA9TWdEePOEPVlvD5Wa4uLhlxa?dyQo01fgZE#x+ zTja1;g#1JCS#Y%#Qs+wWFfyEf#fj;vVfj%DtTmH9z zqA=CYIyz=)ZwG?pKOOXFjxwYw5wy*yrALqWTZ$^ylSzsE)8&+nqa$o*iQl9Lwc1R5 zlEg$4N9jtRnKo@##O`yCZo6!7bqq1XXy$g_k5aWotpeEC1F_qVausYiPuJe=k*g9G zRx@XR`v9*#8g&7iu0`QN09FV4asu&wsYrOpJw-x|1=KGszO~q zxl0D$91Tb_)*A%ueJ^?I{oLKZTqWT<`&@Zim;Ts>?YQIwb+o{hqp*IsIer}120PcM znDbr*930QUqFS8Gpjv$(-YZl7>A*=$|cH67o`0bicxv!XHHMhZ<~VO z4s`sDOerh#*z&YBqQVgfCza`)a`;+x%M)9eJoRfKhCcpZJLb9RWxYL_aAzY7xxfj2 zR_4pGUUc~KOr~CEo3artbii;??B^JY;JLxe^1aCz=_d}T4{&*B=(X|jl<%|jF;}k8 z3FI+YMJBg0;E)UdL-5M*@>1q}WIXrk>juI%n*S^t(9_{IR>z03-`u??Kh*#>2I z2OTZ0v-;kD09W~aCj(7Bf461rJi81BJLGh1Z}#5)vwpgIADeioq^@t%!N2$H{V2T* z|GYMS;@$4}dn=gQTde*FkAy8;3^%NfPONtXtvL|v^?x{lBSKzXAz_@Q;SMOn2 zbbD~X;?0obS_0UA26f{))OZg$hmN_wwli8e+Sj33ciLv_ZVYRGV}3O^z3do@8omad z_&#=Cy>_H`h;F{1(}Vit)O+tYSnm}19yT^n&L!J`tapq4tcq~Qq`eU#Q|Y>Z>-uJa zo`C>5_VEo0p6;hXz_#PN-_yP1=iTKx=o9iwIO*fU;mQ^Wyy!mT?d-1qT)8~hiJlI0 zu{mm_0vIx}u9JM|zg;@6mt~;dYOl7Z04+S}&=y=UCm-Xxbw5KIIoQVR#D$d2op5pos_{#s5+qgc#=>!b<^^VOVG#=t^gY|ZRnqI#r?V%-wpjx2^l)GcxZsC95IeQ`v}V8ghWpQP3K2 z1T5-!duXC$2SmP|%F}doABA6xTRz^MRE9oeG2OnB42P|J&@H*{@e^yCQ)t-!456R# zzQJXpWdA^8UoKv`W8GrslWysAsu8LCNd@7(TTqmtq{zjO;mh33RtzXVYuKLKyHz9R zHi;ClMO*zAHMI5cxMYu5mLXGL<)so;;1b3c?3uJFezWvmMd9Cxy4C!`vg*b0|dTacy)> zfiwlKU-4puN!YTw%#%io?@q+e6EpKCQsJ0!5eE-%8Ss(#81=Off1%FU?6+isar+et zN<7~&rV%S!+L{92;rjOCpM#CReF($<#rd;=&bH@;sBI!nRkN#hKuzt2N6L_U8;gtc337pc>ai78Zb<41=y zm_6@)3M)>!Am=^T3W}F4mKBl?$L4#|a6`7nHtT7VMq$7yl@SK~ zF>VFIFSnfQciHwbk4TiD!oeYAa`Za8Er4T^1XXWtZ!I% zTKJvW{QAXBOZknks+Fo2TqIQfpsxV}sXDwZ@DGqkFol~!y^-#YH`Y+&t8)grEDkJl z1LeY9XMk!i+be7ZqSk2QUNZ}7WQM(rE%K!)EKgbvu2I_cIvqeBS*oDUAMg|iM>IQM!8s3XFu@S`CHu6sck``2p5pGfOI9WZm zNO-)TX0EWZ>lISN&&aLI_e#*DX}V!WeZuUGr#>lFdW0* zmXv#RT+q+_k|JAO3nFS7dmKNW%29$Z;JVU+^pjE&rh8p%0uR!@kuu29J(zt<3oP91 ze4;p;x)}RWJq5Q-v}G8Boe@-VQKwp!p>;}@z&*m65jD<@AN?v`OJ0W6~j3-IME|n9$*h!v4mf2(|-LhhCvQb-K&5xYXU!xsuzD0xslP<=G>wW&gT+nxrJ%)Jz`{+W;25toL2avI zKH9FlNHFqrGDPh6Ya&ks60VNi@7&yZBy+)4HoCHX3uGe5r2J6HzPiFx4#VlX3ti9d zEFIAL7<2^(}Djimg+V1pQ*Mmhb z$AIy9@dhFfzYCNP2I16yVfEQ5-tk*%c5ce|2MO4XBre3<7=Q!?vfNA6ZCZU6jjYb#f-_t zdzsh$SdAU^&?oHY%c$!z(3v*xQT?mglyYH%4c;ysLf*(OJgzof;@5xdI1gf(vhp~O z#r{|9S0?E{W{auCmvayd{3Nn?YeQ{_X|!-#-ynl{>BvKfaI!(9>(cbldJ!;qS(C)h z))3Q6X3z{kdm;7s^)s5G%e{*ly29Dt?LJK*0KP5QlJ`w%+D=5tJX*2Ti=>vc5-0Z@ zIyaqtz}e@_vnEu3rOtw=%BIciix|NTyrJ$<4G?_P$<*6$Rsh(;56f9ivA&P;;ZAW- z2$^3JH&`;*?R3HXSClbV_N~l5Zqh3z)y8gCmcQ%brRCFs7@As+9IOI|4*uKmUwrPU z&`RNqF+6(X2D%4@1Ge&?ExmL{IY3O4s+KWl1>rL8wOWe2zX(z15t3R%Eey;@D*iuz zGF7k&-zQ6AqXu>_RZ-qoUmJv?+bmi;z+~=N&lkTyE z&jt?#Pdy+Z2bU9+QIT<>M4t5cJyAvMP0X*~9i1aA*+-e&movNyhUZb_RLBkh27yJ| zN|zoI8q!+4uYOb4m-rg8SHrd)e{f$m1Lg0B;vB`u2^dY zi6=jA(U34DdI%mOK|W$JJ;QBP+j;5)>hERE^-x+KsTEgJWI+z(BnP0XmTrV}g@c>{ zGTa8J?;SyGTcN8An$oNK8Gad%10p2+Nid#+tmuqAykp!CNr&N=tQ%n^p{pN$(C;^@ z8Vbol1z_QSmfpcm9^eRBf3#wsb}#Yi_Hsr;lssN*1@JD*cqOE=*M(%QzsdmzBT^l$ zHGeW|-qP@=!X)}LI|l@~w0RsjN8uTIniFMZLz2tKJ#CC2VG%|nATsY*jv(eA)Fj+u zP^(^yk2~Z-4^^0F$@H~#xPhy3P)As24!w$n@Q@y*%&NZH*b_h(fW3{{8q?)UD2S?* z^?_BuCdeate?tqW1sTB14f|uYnVNTK(Vkj=zlLHz{`a&aP7(1;QwbaMM5o$ z*;%vrp@?;2?_wp53ENTb7zt7r2b~2=)<{K}I(tE`2arD^I zkCk0>oGQ@OttK$@%_)9m_+rcKp!}hSe5SO?{q4>MY0#F;?7%}zBaK?Nlv;7;FFE~? zR7DV{hw6~WQk929%@0A3T;hC>aLI66fVZ1Q-3k?KD~Fj3$lpMsifEp^A|m zQbpeYpJnN}tdr)=idX^e<*))SKUaYQtNyxp{OP33wQ6vNPBK7Z}8^z zvcj#D2=w~2@C{AREWfem6#;8zyzA;J*_l_lJF9ezPVTo&u|r~7_oU#{Lkn4HaB+=G zHohB}8cSd%vEhZN({{hEg-UeYcWYko^50xt_4)y$K`AkaTfuv?G?;8^W|2PlAplA? zFWL`$M*Gjj{bU0vAA(%4*TxE0;??1&5gja@avP6TY_kO^;{1H*ZiAY_vDRg9A@^@$ zDBWhfzbEwKmKfz`4ORy-(7%~0(wJPy*kdMzpgj3>J8aS|7}=m5eIIB+Ikm)K#b*3Y zm5N)Bx=TT;M`1ydTh>f2#>u_dD^$?`_rr-DH8=Fn4)1)y;}1ZzWccB2%fD(UsNeLG zN@Ap%ysb~lzOMBQW)+=maL`3*_%P*m9~%9na;hy_;Y+Jz?X9<@k%*enZ2UO6O< z8>ykWT5UaK{*eKj3a8T+rC<8@-}C)ugX=0J+29x4Iy?HE7aeGogat(h!jHEm~@?mCXOV@f1Kua zy1v!f;?$+id^?2RlI*XLZ|PNyaG)JJD0j4mdHj)%b?ac6!YAe0=W+NtL(Jc*MiJ>> zlvn{`0jAo5>9g2IMhY^6^Qnn=(0`1Hw^(dXoJy{ECB(|gJB7g9;qtC1b$)@;X?*&A z8L%8nm5uK+ZZU;UQN5|t;;K|va)5eChp-}|}F z+}SrmmI1z!255iI70U_zf!VHdB@b4YmC?C#0(FbfE3AV_2k03X9jDnMz2x^AsW6j0 z6~%~Q6sgx+x#S+~wvJCV)CEc9w5d_2%D{--6^$y_e%DDhkl$Ryp}g(M&k?+7#q*cK zo98pY+mC}aP&{bhB9{KRe)1+y|@!3SSqS9YcR(=j5jcDSY zn)9%11|EoxRQJZTD4DhZ#3RIjvGU=-?AE$dVt z8HlW)Mv^@Y9S^Px`9H>UA%RzPHx6gs#?OV|3=?TJp^+=5`(rkMof!A_2TO;nP~!h$efE)7W{vz;bCARs%Q&LSfpo%`02fDx@al^|##7udyhYDUw1 zZC{U}!h=yP?lB#Z^0fsrtPKVZ$XOVg3Tw1ZsBv|MN{NzNhP2~Rri^bGR z7lQ?qEu(_jD8jpqYg0C;J>OH8(`_tsa&jJxE77!ToDXpda51WvEJ3r90e(wGjw)oN zyjNQdf1hFsE{lbx6jelz8*E|6fv^l_f~h0%ErsRx<-Yw^J7hF(i$0NkiHbvXl{5sE zA0PkdNQij#fC^^?#9!k&T+Xr~j600yr_Z)F2@Uw18=t40HPS?A?M6^b8fS6)3;7#D zC|0V$e^>r?I2lZ0|J3zkfY81xhJjr-UGB95vA%R)wZuf4$hGdfXl|EfMmv_F2|iy~4C5owNUOc(P1U zrV)L8fwuC9N=|4*p8kr4FRQ8YTR`qgJGE?rXoMb*7uGso*|b&*;4?rG}Z2 zS6QydB*AeYN63q!NZ}|9-7VT3p>@Y!Qh{O0V&Fu%YK0oi7>qg+mx=lriT0a%|;=D>PPm8+w?uhl(WnT-&0@57`7-nX{BSpp7$ z`}eRNmIY4;8SW( zQ@WB+Fjsn#i*77nal=^{i<^;=E#eMGwA6LXbjf^n*}%&El#ZK9c1SnqCVY$jtFtO(G*>rc|w>d~6N?HWg2+n*m@{zU<84t9-^H*}_Zz(eHvW2|gW+jL~G zy{M>euWHkN12GSu>`?Yc3U-Xx^bnk9>h~W+!>C|#Kw)ZYjux)qEN+8Qe)JHUcEd*E zvUjFQoOaK<#9SV6$Jp9liVWMufW8oYx@pGVt6YfhmFzyRaGAS}8U^eKsw%$Xt_dVA zx7KWrUs~_RiY_^RpiUK`f%krb`%3nxYK=R;pKcRtT;`Ny@)QPj;MeN^u652OZ*;06G}cFJ~?tK zbq~|5q3zb=uR4Wes+2&l$X^|mLo3nTlKIQTh(=nUBD#Vr2T_r~nF)dzC$%4%K7U9ItI`2BuZ!tv(zIWOd zz}Dl3C!_9yrPu#eun$P_uu?@K z>A6&`<5ym(S;h0Pa51rS{h&07G2>PUhlCWsw!m-c=PTiGP*@o6y)98B$w74=!{rel z#pt#IY=&EYU&&;JnDD0w4Fiv5uK!)OZ3e%{$E`f+rxuN?0l_zGodRwoeb=dPpM;NY zkY#DVAfI$>>|&Z%Dh)3P3nB65J|hq-{lVt3kaGS<&B`yMz=YtUB^-r#smrGL%9!{a zTfam<1&?0BldHklJq6Q(+S6nr-&l~VpK7K*d$KEp@xWP%K!FGOH3njUwHP1rJA?G} z-_C&ea-8*KJc?%P5Qw;8b9z}n7D+mH0##!TG{#uE1Yx(=2aq=Y=mYoqP~93ZY*4wMKFZM>V)~t zRF%LQhYDNE<=7~-lCxEQmS&z9+<{x<2$B1BTOis&!qpLBnBM{d>6AZArdGaK=zm5x zzw!A0AB+3A;MLxkM%;lq?EgQ&;$y_Dcr@0-0avrtnWr}ki8U7^`#~7uI7P@?zQQKu z1}~BrsyS>HhB^o~a(#9VkNzcGByUk_$q4R-#!YO?2#JjeP_MC(7VA#v#77~F@Z<1t za7i0=77kggYa`nnsS(CDTn#9rWd9`e!@32~M;?%L8lZyU84m|6QBM&U2n1n5qudmc zR16}{xQGKM13;?z?_waLH|Tt2H-%B>%20-Ultg*11=B#0KFRw|7@2~R?v|$<9YB|; zn0n>x8c>TTn2Cx-@g62vtttP3ZwYq!Ga@Y6u)-psW`6`OMs0_>6lGNUXBdkXE)7{E z87fmz*{FbAOYuLwE2tT)iwPgx(qc8^5n>iTvI)xxK1N%*k+*B4Xo!gdTJG8KBAFj2 z6NV#t3a$=Vy^}{c#i?StMRw#F`{F)`P)$*{OKC4%b_%kAS1qz~sH%(-q3FkAuXe6I z^U5f9%AiAmr=1u=wekp$R z@Hpr+%+dqW`M0!%6Z(G0G6RA5bQRR#JP_bx@bdX@PfB&Lf~#42Kt4@rjK~CCFu6lb zpO7pnS>a5x!QUijS6ZxKU!PXmAeCso*s1Dz{0!CNp=ofQjBDwE7+Y{w10`r9$c&QUVhiW2<9kRKXvFyR)qTQf&e|&& z68XQ*`h0lviiCi@twIBdH4kB94owxYyuv2>rZ^MLM(CrqSfXO4VGX6^(&Fj(c>BgO z4Ja*i;iv-(-;`R0w#*{WkMNVEHqEMiWdNl1{cKPTw`>E1>0X zbg$f&Fc0N(F}C*mV8|=wUiq=3JsNvwJhaC*&X!8+iKPCj`puFLDH3fttoTO(*ijId zVEOttF!K#4(hC1&9$w+gfx|xTOg|SD_PHfmPlEdGC5(wKgU~-<7l>nuZl3tqSXicO zG!R#1a0v4ReK_U93n5Eobu1oE!I||RK(F11tId5MQd){)n>11^j8TT_httNlX zs)A8mr`nJ8n|k4H7)H$krNF$D$xTNnL}9?MB6006k#&fq9MMBVM*vXQ1f34`S;tvj z>eSQYD8~m83tN+ztpDmcp4MWqaI?J3^w2ROKHy%67_CWi{t=1fpP{c(N$P1|f#Fi7 zyuHNMB)1<2Eg+=fUDlCH>a9<>s<7h_`;OaT|CHnR6XV{{L9#!OPKMHMgsxdgS{rYb zQd_gM$4dS-{(6*GFf!w+dm)u~JTN#JLwWF|H}%DvE4fX}b|i06*0Yey>=`nl2d-cu zHVSTTc83~-j4#1ZTtf02O6WUSHwz9e^HNHQjayRfVPIN_;ujeGnSO_*td~-{tl_MSQTd1<~5cUk0hLizZXo3ioh4 zNz9!h$!GI^(<3lv=mw9J-gB5<{WX+}Ddo2*L#nTYW8#*rUCxtDvLrOa^QyyKQ(fv|}4{8XOPY7PCy0Sz&}1c`t1$qKg?BqsW)E2u;gh zq;Ic}d})GLm&_?z1xO-;7zR?S9D&ZMq;>2k+i)()-(GOntY|A@6wkp zR;5K3D$~e=RYx{MTj`7wS5&euO(R?G+mrO(q)*%fIbJwT>-?Z-;7AdxIkM|6Jq<;f zo)Bvtla2U~U9X#8XbQi_7~soE+cwTg7^6c&xT1=B-qo>@c$SpMwBW$CMCYNnM(M~d z36u+Ls%@IJq-)J?VgC5?1P{{9kN*ot7^$HhO2Te_3skqB|1a5QTl$r2Tb23b@cOT( z=$kGSVs}!EkopztpnE@Ya}%ugrJ2IbfLLEPay?DD?qLCs&Ywmy3}YC44#j-_XwE21 z`xX$j5*1;b(7DEPxQU0Kj=#WFVA|4~_4135N+N#)v#JdZtsSj>62HJ}`mbp|_Wd2( zC0Z{1sjtu%(KsSdRlv|AzX&&7x?J)(#QPMs={1AtOCq)$Nmx zJfz@$8}?d)^?sKf`a|ZT9GeMvTWkCJIEZC@7}XLENU(hUNSF>D^p)6y zhQ|@HVN~uz!Q{DVG%ZzM3gm6N6`IB|+aLLgC180XQ#$+L^z_g$AL0^JM*>vlq3{^q zOR{y@I~z)9KV>IS1jUctXswacDMB-38Xg{JA{b~T!51YZ8F`V0&uwk>Fn&&cEJae= zQi93dB@48WbCftzi}HF)u+u5t$&w!HSVk>K&j;T8h)+UBM7MzVmb3I4^d_{)2!Z~sv*I162ixqg)KI-J9UFUuvC@krqj?K{rz?X3rZ2lNpu_a zKeZIj;5nA3>H$r@m=zq-G{k}{ULIl<%$4@csQ|KN56hCIoPewqX~yE=kC4nW-qpCW z;~nn&dpT+vAbBb|1;o{Wq+Ev;kjcgbHl;T&ou zopw3gKvCE!_R2IbB9^os)Hp&^nRf`T(1Ss#zOljyzuSjDf_#}da7c&wRnQ*BE{k1D z`AL9ZWk0Q@a@}PHey8P$o_SnNmk71=B~S{V%H?Wd47BSQw`VS8GuwhA3@>&2RU8jW zmp|*wrQxETO7YQ|*Ny74@G&x2%gx={9G(~HjBt}k2$n;A-rOUy9+fjQRSvPI?I$5| z!Kwb!EO$GWHCSraXyi*`HAv$z|{4x)W`@7P~P+*37l<^p#Obs9yu5D%qDj|!lm=sigydr_=Ec)T zqS)s7(!~VgOM$JMH+*38Xz~=tPTITxQA36#xMMclxGaAr52EiY%fxd8%LwUy#0``W z>Tz^pT>bD%q2C8+>z??p{PQKX6OjoII*#xYZ*AHfwUDuH6Ah2f6V35jJ*DD66sNE% z1_Ahi=`Ohl{G<3Ke(eS;wVyUDkaHyL%=f&d4DtfDIB-ND%I)Ex?w5Ak>5Fiov7T(egMXA=s^w|GJGh$F9O@PYXoLabA7iQSZtPu2Kj%qd{u6i%dsmj0c&!!I3p#k>h!)bdJX zAzWUp%l@v;4r8)_VtUh~0}<5y@;pOyMAITUq;2 zctbA6e=U4J&Mp>Kg*fn>FCgtleOtukh(Zh3BBkcp#R0|psLQ~_#ZD&^r2}WgJc8O~ zM+PGlA~g#I6J!@Ph7P|9ooWNLC@AWFvp?M0HH>8w{o-?32z;%M`71PoihA##^sz3+%opvYLgPuC5znZ{QU^w za2IuI6?1F<)K47@TnskE3}<^S7E{u`NRw1`7R(RWt-rZ*gS|wwt6N zC*qWr=$<-h^RGsOI&54X_Hq-xG=y=EpUKro;s^Y%g95AP+bnpj%M;`tR5H?Dg zcW*3GoFadd>~__JxaQN*M~u;v|3qI0@k`DLMXrPdD3&k;DtasX6YfG_Zf6mLpdU9IJzT0xE*VXu4#v*xQ zZAe5ScpeHJnkNH31S&P=3LG!Bm9-DKwq&C{UZ$i!no{0Ze`8`IqA2^>6kRE4L0fgf zRHb3o^8j%Uy`Vu!LzNH^Ol;{*?*|JrXy+u7wt_6&|rwuYj3uj*4>IJ=;V;xr{!j5MbI8kW(pC+qwj*ONB9g(U#T~(h$xElCKK2tYT&hA>8>&O{nL<3=BZR?;;)$W z?M)q)@x!WxEnu9I)dX%c zmx2zKIVDKKvp;bm&FZaR4p^#(Tn|BS<*rKmdrOF9&RYAQZ3|gDS3#&>HyFWO=Oyi5 zZdSocSMJHL5t#c`kuXyumuBY!EkU_q{tTTTIqk=#cf2UB-1a4bd?u)xUsWv zex$byvA=WOH=0T##3B8WL)@#KD+^B|7XNeRp>f`9?}xLSFAprMaw$I0eO)fO_Xjft z4n%uBi>ET9(k&NL;{y5cyD3{LLY7Q9bxRd{+z}MI&lo0$CE58zx0qz2W%NUuwdH%e zWf_Cz8EvXXidYTds+;d-U?}N}Y1Sm+e583$=V<=fzrv-v7M_E&lR5j9X-m2o9K5Ush2p zWUr)3HPrIc5Ahaq45g3ZOoKpekEIscCcS|P`Kj6G|R#pl@Lx zTu^U&H_ zr*cdqEoMtOQ1!bDS1eiD3&{cON5U_p2nhKdDT7)@wRl?k3^?;>E59Q-{&(Thb38b_ z--ixMSO}c9G~R$ih$;*s2mt9ty6`+Cuq6yUJ-LO{mOwVXqpv4VQf-g&>QiZCN3bdd z_}(tsdL=+hqp=1Sw0ml?3@2yDJtOlyV~%aoNG!b%lB0PAQ1il2OAk7-{~pk1LgLYB zawZd&ITDdW7hWfl5k}? z4N+|fEt+E~bmm4Y)vjsoebwAu8ip9Remm+pGn}Ofk+gu6K#U?z2|!bq8bkz)OqHjriiROU9xp%fW^Dqj$lOh|&HE$c?Nh9%DqVq#+2)w) zOAJg*o}5Oc!l1wr<5B2{|D!dOCwMM|wbs&E_iL>P;-&fH*frL+ zs}>l5#}pqg`>d`x0J^aYNF_~I=yLK{jlF+NJB_ilX>H6rXwz4n6E-F1^eHt5Qe=Q% zAg#u+VK?F>#w&jf_mfa@%6yh=%V`|VZ>TEYlH=v1)9`i`aZIbP@m#;Q+?crmw9oUsuj{DwEXGNO|Z6Fl`eM zaO^s^-3|ff!rU}2(rL`qybu~ORmZmHeii*y3k)Y{uzLfdTUvsWLrUZO-vOmvLoiG} z+^atQrxIvaGl$FWkw8v@GG*5k!#HQ-&JOHVo$&PXZ%T>>!+@Dxg^%E}b?SSa=yBR( zBGUhtHg!^O+rd)GSJn&Xb?bW6?4Y58I17MPEN$w^BlB|`qCvb@wY4R%sIeW}ljyEL zNMy}q3|-4MtJ1eAzmR@BqiK<|tETVE?(rat47#N4(k92t=G4pK`=+3o(pp}!-f$xu>0wDEIo&2)(Q3x{;sM)M=799l{3$rlC< zW7OQ(6HC%K=5f+Ek1eCE({N@M)g|UkG}F^l6ZnRjo^!eXD%Ymi)%A{9UqgD2p;Bwp zD9|X-D9|X-%@doAfQ`(o~lBrilyoCp<78pW` zk}(lSot%ZT+3o^jcr4vO&;r`i21^By_PURK^e6gmgaD;}bgT^^U%`^dcGb8pQ$?rB zT#wEnfi>CQH(@>iU7GWf&jUoWd=G?a;!lZ4FN-3T{SECcjZq@+c^(R&h_x4LYFD$4h+BYJT@^Ch z*ORO5mB{w7@0->E!xh3?m=fdm2m~fG(zUg-ksRDiWLcuQ3{G!#ZKS5uhD*L0)If5Yp#7J+FVyBBFsfU0JV1X8W(0o1tT20{eRCM0?a8#V{O^@E_?6d}b})XwnI zphXooFbm$VzNA!Gjct1Y;3rheNT6zY1%r=~VvIS2!%)9v$h8E+5E7V(FO8KVoa|k(6LRgv z2^jpvzC~7D1#ZG?D`cz&MUQHunt1LW?PtPSjQp-0I!nd}>OHOxO<*M;9(Q*R6nSRo zc#HBss55kbu4sE71T}^q;J#aFo_aS<1NL$f3 zXfz19S|K%MjxK1Y-avaXS63Hv{cVIsSyHvO|EszWGn3{Y%^$b(3({|#Tti&{$K-NR z#l3CK-;>rU{8h31xm@iKlO+|17_HFVT9;!Qt{_zl8iAYhaw?7T6zbZCX#g^(`QFK? zSjzcD#5zd_wKf&!Dlah;LWj|;N<=tw(@Ed8jZ4yuaKGB4?Zk1>So%UinZ707MgJ!S z2292GPG$J07$P_f(8DDPeOr63>}RNYTWvV6)*$An&|w50LWps}5Ape?v>alZ94sWX z)#pX{DYOgS$Ke94t*^AH)Ft+e$^?WZO?Zwl`m&O)MRGZhnV%-vS za}FX?=2|T<#IzKzuHVWbmC|(7q9LYCdAn@^Xpu=20GrZq#0#8;=g^|+o-vE10dMjT z!Hia*JjHacYjSR`vU$0dXUtCX)yp(=+TSJxlkXx43d}Gu7ZYQNW;*@prPkavF9*vN z>wvlXdQ=}hS8^MgbMj%VurouaZ5X@8rfn=IO26y_(fFb?BswIxP|j8UbV44wB9fYy z-u*HI71YK&5BkI0QsNoL^ik7BC)c&ad0FVyrVtUM^|U^Al1A{AG~v)NM5|KZx~VCf zI$`u$XTrQB_NIQ&5>kGsRXyyeF_e8r(-+^ZIn6m7*_tAaqSlYR31de&aZ!ur{1useM%KluWoN^9#i{~X5pzI>^B;NmkBL23^6V*lbMAir~Eng z+^KWz0bs`bN>iBTu9P2Sqb23XRhTST2g&R-xALS%P9?x`F^XkPFlV!tn|VOpYHt+? zg9f#Og!N=yRr~?Ehh{?k}?Y5CU-?qJ{iI z2p|N6A6~Lgqdyc-YSum{XPA$XlQ7aQ3aZl7Lu8fOM*o!lAN?V<^ie<2FEjL@5F}_K zTFcIxq_mK-&hn7iVD!$sriLUf)NeOIWW;xAT61J?PJHOe4|D2}r5(#|j&g@~Vl=Yl z4>{yvg8jU*7PR#Icx;Km?`3CkN~RYtvI&DS?_c7=fr{BB3c4h0)*Y=p3&z2YJmd&g zUk@KV=u@jC2mN|~dhTwX%+6)#Uamv`mzK0PzhbTaZpde$Lq_&@AyjHjIqI{VOU+4k z>T4z@y_93sPyRV+DUm;sxuY@YnR@;#C3DO|w=@Zr2fU~e;3!w$L^4I;K&|w|tdsT= z+tGKa@krRk&v1;Z{YAc%p=S^s-ek!2cY1?H%2=#v>T|7a-;mz5lxz1j3N#8VLIH{_ ztwLkdFk-4Y07<)xCwRh8VXkN_q*8&YO7N9^a&JsJ$f7v4qgCj497Do1YAWjoq&sV2 z?~4%X8al1^VO+yp+mMX{jRG$K1;mJ>Gg+r~{2iEq(600w4PeL&gu`B8sHKT2-b-{8 zLPFYu`~wIJMPQNk9D(Fv>^U5`{Jlw#-1Ns+4cxow3vcsp6lfIKO$tzY)&;;zDe$3Z z(!$c%kC^?EfW?a_BMrz+m*%qVqmwcm1l0Id`4q1D2{wUo)fCU;3Bm0JjB=z^w5dnL zun?exm&%V3(yn8h@!mn-P=Yg9^>KdPb*S9c-u7_J@GkU|g?A|x`gJ`z zH)Uk-Ut(wpMZ8NF!a{RWNONro<87}AVL`tNN0lHbBZC0$#r?a<`Mm8pj_Hr*UHVV- zc`{$6v$zB7GLjvWnS3cjfV?r|UU!?OVO;YQpw>fQ|6x5tLSOYw!bNFFJY>}(OUi;X z9zl~Mw_NxblTbV5x{8RoHNfb~-&i$-?dCg-&nt5sM*6FneCA z9M7-*=Y&yidww{3apdYb*C+GX=Im0`&n-KJ>$mVM58iy!nd-seK|hda8J*CCI7d^} zoM^MTD{>8?FhRLs&Peb#-gv#wRV6_f^GvDT*`vCAQ*-CNjr*I1u?;F#_Tf&mTzHD` z*D?&&QfNUVd8Hf3nmHoCriW}7hG5f3BKM5mpl>o_$2qx_ZAAs87*V+{g zND)tDe=YMf#dSqIjjt)bE0y5KZEZ9$eHX+z?^6u7#XqrTlFE@(Y6pH|SDwdInlyni zm10|LuRKwd?8J{Dw03zpqBy4^Aooqlxcl@WSx9WEOP+*7(pE4#QAazD8?(H63Xcp< zyGmsH7D>IeuA*Szl(;LWcP&yr0#ch?Wj5Qf1a_o+PAy&{yy+vR^w4LZ>f@>Wx@k_< z;W(!fAm{>HS#GlfoV4mU)qkP&G=VX8r*FBPBgd6fCnpqi2Ch8(Suil1n~t(citmO} zQ7GYHDMT*Hq>JX_j_h3DJ1#Mvn#tM4}nEZ7!uztT}=ey=Z+Kn=8 z_H_~*I>mJ9WtL$Q|6#t;d=b=I3xOTiwGPdlgnwumajpr$o}7^gM+pmQRCf<{8b2V*q0h*UI25A?mh}VjZVotlsW~Ym2RTWk% z@QtpN);tC5rKEi+1XB)IIZ3^0Sq7%X@||vGvFxx)_~-Q=%q&B=CuydsQ$d#4xCm_T zq`r=(7{W>YWRh+56iA|7Oo@#Tq0Aej`wx!}Eu7RcVcPssv{G_Xdm$rF(VS9@yLrjJ z-vwaQHeZa^SEImnQ2-!Zney9BQ-=PY5_}Ogf}!n&Z(IA)J8rUd=%OguWOY3GXni)YSB*zcRXJ z=@c2%kR}C&m@9uTm6`+Wf*<F=JNcG#@B^h|}y=W@X$<~{*lM9Eh8m#4e8 zFkyX8Bvi~eJ7%5|YJdRa;Z?Nw+-E=2ccFHXJ`p$=pU1o3^UYQlN7YczFD&*qW^S=f zl}SpPff%<}f!hH7S`Qhuw8Z%|P7xr|iuYCSGT4mDcSBJ=Xagz@>xG5vq|XHa%{2_i z4`hIvUf7%-TQ4qkpQ-xhPG8e@JPg%@4?<7O^4`n$`+gV)bao7j)mn$5o2EXDt7z}c zI^zbsNKvkQR{{a~9F0#jCB<}{rz6Z&MNf|4TzxL4^`@<8FJVFPqCRzX5%wEPz3N_R zcV*HzeeN|9a#>gex`ah-xT5Aq%?WCCZ^EdIc}|{IMHT+6-&l~Tr1O}*)Fu{ADkF2o z_EFOgr)~@(r6OKUe`D|Q^y0a0wFFcQ6(;ClY+n)NCyfOxM4!&XOiE}CWBQB-#+1}B zf%D>ZSxt*_9M;A(Ixg6h1!DbEMw$VEZh;wa=8WE`q-uZ$;PII#jQ*mzyR&Sqo=ue`DI=6MzY0 zGE#1-noDLPQk!!>47#R2G^{cbYMq}=KZ|SKrG~Oeko8(f&RsPmfcOkLN(-8MO<$Fb z*b9a1qbMhV=Ha$~A(?ZboOaAcygA6+tWJvu${|EF9=c1klXf@*U5T+AeH zRy2`V=ejSnlC=!bxEce&fUw&XdWvam4d5nomFAGOm^B7loRsH2n5FJ_|B_5l4ggH$ zMN#w1F~x+-;zbvQkGg5k1d;k10Px1nc%@L|RK``8n~w&B z&lEZi&Aqlyuru7x3At7SikJ$D{HV{Xz*>koY2KcpHG@c_S!W83$Fz;U0iCFa>Qzrc zN_%CxTzCnGwU1#8+RaG$_n5hH%TmnuUF?s&NP%{1EO35!~L)Lt+^Ie`zkc z5}p&4TuHfhcoY&|f-{#@m9XK2=q8oy<4_;HR-p}qk0YDIq7Nntik0?}*2cIJX7qPc zlJZGYgUO<4xe(5bx8Gp$%NP?*zMCXuO7n`^dY&Tt&P_z-TXLIoHt%fObTDT6j_oj2 z#n05AGUg6lz6P7C%w2QQ*1Yub5K-GFK+19*wJ&Te%c?%EDhems)m_WU=w?MPEp>?z zN3#zY2M=S-wYsMWUT_|)Mg7Y(bFiQCS4kk1e#y($Rwf;c+8)B_vOePXF)~cr5K&+; z0?Xew<&iTXifKyo81k3sGrjA+R->8o&nzUT%&Tl$B``Q;W8XeIvGms_1f1CuHDxb3 zW(`iMl+{!zSDoO=)XF_>+Ays|42+Vfal&pYQmtM~jiC#hxu0yGrsDE2ZnTin+3hcz z5mG1CsO_K?N$?D%%J4a5QyUtS=8OG)j=g)%1Dtvy-8|L^o^56qA3Z^f9F^%&*5e1ZFrzv)D8y!7ng;=UW2|chmi`t%RC>-0VeesfkW> z#)-n&d5yh;ckk}6O+$ycs8OT{GP9y&e=dzlNrFbg8mFvtX#G`4Z;Jn%%V(6MmJ%b# zr&IYOO!EM1v2qqCLBMx?*lnI=#BsP!BjYw$L}WB*5&90nM3^l7qU}U1hM47DJM|tP zKk#lbNvSRP5Pcr;CDFWvPlmO=`pQSAycex)qd=p;00qEJ=qoWkF-^Uzu{^838ynWE z5Yv(=J3|gw2{GZZ$Z;XjH-*hcAI6;U)m_~f)fxt&sQrV;@xBra4!X;!yWJm=fq7^H zIE`1D#47p$M>x5TF!iVpghbESrD=CADZm}oFjdN}ojjJ2Gqqk!Nu zG5)ET2~0bf13BHhU!zjHxCzfibF2!hO_Kjn4lPa0R~9KVVkAavOj<(Yc{y?y`6x_iqsXiWa9li#=AGFu+ zZ8Na3bC2z0rXigL%ouaZl1cM04=F-P&e(`AVL!usb@FGA#FQI^mDa3#8~5Ox289lG zg1+@Vj%Jy!{N1&+M9|$76Ih%x=CBwL0=^=ez`o%d-{nCodY1&?Q`YhC`Tb1i0OlFw-#`DEdXjPJJp1ttIhAOJ~3K~y}m)IZrFP2r`fA+SrV=OSSd9m|oXF}kXDPNn&b z_oUQlBqU;L5d1%L!FyE#!^2K3GDzl_l~h&2K}9vClp&=PyOuz^a9{Fy06I)qk{IR1 z_fzJIw&FgXZ03py4(W?p7@{XwgAxEaWn2OT7*i2!gxXH&hqfTBDP2mmSQ-5UQXHm< zS+5)u{}arlOg>jL$8Fvd)1DCCIs~562~$4_D3)g6Htr3`+-=TrOke$6Oqhv*$J8HV z?yqelW(t!7xg7<^il3yAP_!Ut!WI<^LVFt_qP1(#eOg1*Ebv{F+QwQ$$*2XW23>8DJGLHMhf zgG^QgCzRtx^fuba6jbjR8q8?}+8J|HL~_S|ne*H*#yNAn=w8hWq_~Gco6uMij2Gtu z#-^{VC-7wm3+<82X4>(*RkmjcT~$BVEi4cUn3&>x9TO4Kool{?xW-~ib58m(ycxQd zcnt^WhQxz$>^Xa?N?@#~;m~HYd_ra)>$At^R3CFDwrls6j69}JuVJ>UlXybOJ0Nj@ zY3rd&vvBPq<5WbOT0oiGt8=87o)-eC7IT0(4m7w<+n6}wGX!|2ai|xr@5^yz-qf5( z<-lofSA<4%zLNeUe#V+NMe*!{ImP#n_Cby`0tBZ6U<3s&^$yB^f#7w9sk zN01=yQP#qvh~5FAJ!MR_EfLd%G1vMq2yj5yaS|*Uk84kvLB;eRWsWEblW2zGgjt)S zV=8?vYp=Nrt&O0=xp{8A8KoWt#4c$YJPSvm>%A3-$;Qewh#Dr*iBY98@rJ_BlEM0&@(SVMWzYM&{naA+|k2rxkRZ6C()-DbPep5%qIb zC$uSZZfzR{ZXyLZ1?!zE(bE(Fx^7L4=qGYWk@Ly(KN;rXdA+K_YGqe{)TMgXXc?~V z$a$4RPN(27bb`l;i@8UA9UR`bkWWh#n2RP!n>kk*&a+UiqpVBN7kC3gNiqFQ0fLdL z#1`2Xgu>DLyl#%Ru110Dpa6Jiyi5X9JyG}9H+5)alK2$lL)i1S>EG?!{kLT;69_p@0$CcPW!w_I&2Z3PLR`oNs<^~H?35@t~9B7rxsrrO+MZH+P?y}8AF(ule!5}yYVO0nt8_x6d zk6w-6D(GTBT)KdXI--2l|a zg491LB~*)p459wg1Rxt|a}pxP`@LFT)=H)bG#|skiL2BdQ9vL_rVt-&$C}{Fw4Bn< zQf*6Mn#P&p_gQLQOZDp|Jac^$avN$Jbc}#Lu*f2lIIO(WUdxjB`}hJ zNMJn}2`7DL)T+lk0bI6s%W55454$0I6Liz^mp3nR5@huGGgx>x8+* z7f+v+djb$6rmjuG2hGL>i=DLn@UcO#@bZ(j6~nD#x7A=aE)!uCgD5fZC|73l!rAaV=4xr02UTHOrjk9QIRw7G^lP5eU#a3J(44 znRg|~gVx8~#?XT{F|ukKd7nfVXon^w4P&ZrrCdXmz#w!snxcr9BGtr{)2PC9$*QOc zl26G0RxRl4FHj8%oUV?PR#x2JXOT7vFK(>IEzC&7hD6`Pn1{+3vN4k;MEBtcp5v@` zGhS9-qt-NLUQ4!tjyPgHX`wN)=hf*AEu)Djo`U%vrp0$ej}4jXqJtIzQZi*U8#k}! z^c>jfBl^oQj8FEF9tqdeU*bLeJ^&S2gPV=R38cV+z$u#z!i`uPnT&0>K z&P!+dFVsgB(pyD%`cPVF6gi(#Ovop*)mWsXYJ`KG5Cc^`?G&me=a^D2n?MO+wZ3_B zSZp*CAz*xAS^J?j1MTBUZ)|udMTgX{?k9SS)Z~QPj$Q9IHbm>AQQ&!{07ZwuO$;%1 z7+`c369J{~qNxbyHfE&Kq!%N#V5)>p$G!;lEg3H%u(~Iv2dN2n-t}i%jl)oKdI!XL zP9$UO)QHFeWQs{DhP-;8IiyV`UrS#Dj^-G}13p4Rq6h8e9BR(stsewStXenq;V_PG zqL4cZKhD(tTKh(UMuGDb@cc`vg%Q$oQ$qsg@9&`fix+qfOhtaM%&FU>Q=5?I2M><@ z_7bZ=zN`M!FO!%BU-kmW3qL7b*AgU}rgD0E>iyiZVowMd5)nRc&$mio938!-J3X_| zQa1_hSvcrhObs;jw}qRkw)fNsq@=ARt-%!udp=LIgIU$#B0^gafM_g)5-0q)e|p4u zt~0JVd6d921Lov3A5$KtJKep|Owx9uEgOM3rN7p;QJ_&^m;yCGwMNUUp+J?uP!6Tu z0c;UVE8$^G)#_b8^=OWh(JSDzj=5*$6G)f}!e{xOj?)~XNi3POQrS>r>L!4=hiY}+ zUy({=>_mx_QpL?@d#P`tou)d`F>~RB_jyUE8G1aLw$x5nTnlS>^_7qIQ>WH+#Hq0M z8dI5&r$m2IdwB@?JeGFq4_fJ-RhEEtWP3y)>y)NBNYIzb++tdY61Dq0ngYhsdE4zk ztgBxfyO@5Kq%#TrV#uui*0YYLS|fu$lfox_8e7Q0=8Y_V<(1Y;J@U zKugVqbQy}7zZ0B~`VOJw5ilMllMuJ%g^gL?$xPuPqB|6pTubv^ybmXHEG;@W+UBx# z!hmBwCk_xp_hKS~f!d3*b|HAwFD4|JqPA%|8rr8RO77UhTM>BLuVo=4MI!?0^n;r1 z=Mv24d^elx)4P%u=`~R^%Ge71crAS#10sx*q z2dkl|6fqGIRa%TRhlm9s)K;OlXt`8qP7<{o= z2T@7oI-*I?g+?|LF3rfMuKT99C*&6Y0Q9ZF8cd-DA)%z)0GVCxgNQZg{R*M*5@W{q zMVh>X1vM}03*k2 z6i>L$peI(!6LZrEK!QRHTj0TX)d&vOfWV6(yJg=o`JdJ))iqwFxz5370yJji@R-do z)CPu*mN7>FA!PlT*!+h2(hH-pbi)w zNt-_K_lGrTO>e&W=9KAIx2+NwL`R8<>+hk{dX1j|a9t|Olu*@<&bA+E3sFc&yNP%W zQ9uj^6AURv%8a%MrK!1}0I|WirQM`CCZu~!q*T(Va*VWukZVn=tgr}Cue~iZn+Yp~ zWOmqyww$rW{aH=Gxkxr$7P~rclzWxHhFf zDGv}4@TL>Dv=L{v17@Fy)7Zuu*8G{q>^1h#xH5m0v@3z0Xz+LLT%1iCLGtv}YJ=oO zsQ;_F>NJcDier-Qn02inF(~~Vix^E^jHE2Nq&$Q+=W(L%6akf@pwbWdtX2$C50g1y z>OlY{p))?sxdbCwFIVX9bn>n>(P%9(!C$PCzQ#Zc--g@t2Yd;#beW^M0R%I({S^x4w5LCfBQ> zlRLRHx_#4_F^bCl069fSIo_HNA!sCX_A_Yml;ALpU0$X3v59URh-2Ligu~EUR=hW{ z@T)QrRw2EA_qV>k7ZC1{?ZJA9VS|7ki=GUo? zgG@)!;VJ>6Q?lw=eEijqjoQbAn9>*q*85y?!If7&_K{KjwMLBsjRK7VH<1Ff^h@~( zBqDGiQ=-+=hZn_+E+P)6>u7&}oD0wRXF0>H`skh5W>S(l0uUZ3Zz&rtAo9uFnR+(g z;I~kyZJ8|YqWO_O3DZyn{@uNI)IGE;qxI+Ro49S-oErri1zto7R0|9_opJn%aQGHn zLom&(){mjSoSB=wJlX4$1KdB)f(V?4{16E%kc3t*aB}GF-tA=NBf;5P)XjtvtyG)? zQSPQF8$dNfmpji(OkGfqjr+3N#yK)?RekJ?SW`{x7Xc#!LMHB(` z#DMgY#o7_-*ZmVY9~9s>X*;wdr3JnQYC6bw;ijwXFds{N-_XIao)RvuqM6co(<73i zZZ(XiMBD-HEZ)a-xm}C(mXVO9M*AOUL(cs^%AENxY_Al;z^cR%a}g< z14b^~(--8)XxcG-Y-*#vSZA!8Kzj}wq~-SFK}9@2s9uk@k$*224Jw~UVhkx@AUcH0 zxl#8)?Rgd{P-NY>8ifS{V90T+y4pC%HAg+;p@=HAi5XpqhAP8P!PGDx(z?%i4%cyS zN~@F3<=c+Zx@bL7Sd{PK96D493=O!7@UfF+&ducL;LPw8M?dyT4BW5=xi+}^J~>Oc z3tigKI=w{PgW5*G8w{P_Xc{qENTHfy_R$A1W{zd#jTk6ltZwegrD@0Xb?(K@LKY!c z)5j9`#29duq)gE#=c7OXQo?lfoHoUos}24mk2$}hbOuxD6Kg})hLJ*#7zUa&=5?iB zv;uq6V+sah`h|GbrTeM67n%Jh?o8k*k;kJiu4`YQ*n3}_A zRo`NDQdgcKPPH-AmNKg{g#l!Xi+1gV8r=CrobFF4*(aOK0(i_BA`W#G6v~2es%C&N zj3J{ARNRmvWXeNbqOBO!5QC4PKF5{hbkaj{=Fv<@mBC#R8q7C_U;4qQGJKSEmEfx( z4UMo&F-H)}=TOAbDl$loK8A7)Y0f=U58$s71rPJUT+)YSn`RKNHFFf@KEpg|%e?y% zqaJE1+|W8+h;^x*(vFaNWN6Q7ozTBla4+#LOzqRP!TdAz-rDvE+9)LA{;KV_%7-D- zj<5-0ZfB>-3XYRcz>d@;+j^1ZaOJ-FKg%b7gelS`+@!(lMv9>t{n_$kf5?PKQt@Xo>nC9 zY42vWfNh{2#dYqFsbf3iwttZ}@RO;1O=%yKG5UR+1B#@KQ~LD1GW78yu=FNM>szFYY`lAl5U=d+kfeeBxf zS{%HV6Ww{=nTbwK4MMu|XiC#KQ|LB!{@Z8=cx&jy9mApVC0%qf$BZ@P>)6IzYx{8e z+rDy!tjq;6H(uhi*)$f}llPFY);Mu4ydR||;*o^eq~+0l#qY}9g$?|pOh9Z2`8U-N&R}5iS*2}YXe~rD=2y(I?kEI*!&;($NFyFEt z7zgcPmL)^c%TAVJ&*WpX%o#U&iJzM zh~vWaOJB!eijJIMl733FUFz3p>x53CJtr&{}Ug7Wz<|^&Fq82`(9InR3n7y!B}F6)i?)IqsQOWhtSKP zGNrDHDrsV4k?7y{{tvnnURN|7A<)pWb|D;g6v11gMuA3wMu7?nOgFseL1Iy%$4`^# zCstHb<-M^N1(3vePmfcY-jr}~l{k>TIm!EJE2rd8`$vxq#Fx`~D-b0`Zsm-WS)fs& zughgt$Ig%Vd2IFNsd(4?4`bKW+BFI^3N#8#p+J?u;KZ&7sj^6s2A8@F;~z7bu{oW_ z^tD8r7~LZ^4?hUbe-~U6QlTs{$JS?~K%+pTz|s_`5*X>UZ-q_z8Wjdy3t%|HSC^jr zRrQb0L)pS*r{`)YKfLe3if;!lz7`5z~iHjT#zDi0f$Qrl^dM7{U!m zo7yGk?KWr>Yj{T{HWV7Vjq#@T0nqW8(#F_}|Aq-{y|}fG#WI}+%;dr8vX#t;X6X^n zw%4fX5NT4D0`+js0gOmwOuC;i)}^Czrar&R+V^zHj9u-A0JrW`wtsTmE1Z~$dZlo6 z&}gnw3GEc}Tu&Q~D{}RtXGs z5}nWbKShrK#Z3jW%~{0K_Y!I1OeCZP(YQ)N_&i<_BSGjVA(p$aUOkfoV-kD9{Mbtv z0cJJxqYNk^tseEW0ZUNR|E7BwuLtaTz4l?ei@*2*1%6SOWQ#BfWK&eSnM9mmu7Rh`V)KPMI;c+uq{wOO+ms$ov3vbQtl6bled zrN?!)7Xyx{afD^b{9;J1z9S5`d~Te$q03xNnNVZ(M_QBkch6u8v^{fNnzsHfWOJGC z0ox1Pzt`U3AAMYF%zvL+A}T$dCFgM6efM~QD?A}xwr1uO zYdFsl+(g-NQY41`vUzOl`=OGxv2RYuXd^nXl_y$H;Sv&9mT2j1&2{O1F!k^}wPr`o z59HYb02o1mB~m_CzLPHmAl)1=8=yF88=L$~@%i-NVXx_3ZQL0}di+;fpZ1m3ybZH? zmB7GP0sbH+&I5%#YE?I+(JKANdY&Dv8{t_B(HpX~3#VCbuwBwJRLA=~%Gpj*mAPuo z-V6W$3E@dZK~$f3v0OtIV+v_5mVTCwlD2un6g6JCj?(U(nB;oO3Fc0Q)SH+}qog%L zTY|&w|Hbp|%~1JyUFO5mbBzf8+Wit4KHxlQ8fDg9G$TJ)Xa7ZF=2fhB^j2hk&;#wF zcD)Q-CFUYB8z~Yj*#s2( zi-Lu=5?ZA%v}O*M-4p2{8g0YbUWN3g2ud$?a!QZhG?i`qHQ|ky5#c(%r`S;(8|S{9 zrKD(RNbl=-I&I8GfkuI=qCk~~(T6nVGN+VhJ+k6B4f5FpRKY5~rfG>3sT!je-hn((L^)O#T?_=KIKHr zDcdoFNS(zF>CW$5?CEB1gEs3%fkuHwfy*gSB``b#tVKoZH~E~D%Snt=Oc@P?-*;}A zQ_mPK2>?k_sCpNEdB$1m(*%MTyXq;Ze_aw52pchfB_^cyF>O5THAIHQq=1k>nQe7> zU*@~k9FC4&?jAo%rDHa|>F2$C{ql*~~vV&!sjuUf>K+4>aRg9O#{V#=FBkEv}z zNXIyL-%4gnE^A1W1I)j-=j#3mbDBp4x1NXg5Rp;<_iB;jQ`fVt_SC{AN+zQ8YY_VC zFWSPvyC~{r2#JoZJch(ZM6>z>eH>BGwRDdA0GtrN%W?7PfZ+Gi(nan0vCpC0f6%BF zQ4eB0(nND9>7sVg7ePp`BDLarLCS+uiR+q!Ft76XU~cl$yb)c@*i&TWblowEN~d_7 z;MA({h=-g(7AA~lAb~^wq z!B!ADdr^d&n62h643Dlk0V^@=nlWqFp}p2yFEWNk^D842BY3tfYg^I?Oo|S^mw_6` z?uROY0h4vDAlLIIb+`maH1$vrDW1+D0_j802oN})mLk`SfQaO$B7J$!%w-M&7m@Gj z>n-GT>rogaEhkmFQ|feWoT!ngEe{VO4*6sHHJI?zylcbcm@8w|_A~Z@KQHa{9C@w) z)MD)f)~W9h=6bFa6xsxkQ(IoHxgm(xL2Kp=BXkC1>mI6|F=l?QF_cA+2I^tNzSzT2 z_?BbE!rfanxCs}kUP5*=qW&`4#IV(f2dZ6%`k;17oM$x$0R%c41*iO8D>34Vz@WPIeXJ%2bJzS>hyG^X zmu<56eW_8PXPYJykvo-P|)_W8#m`Qk0OZ znmX0rQZ0&gr@4n$1fuPf}5=DH)SZL31~d|Glo=1=7Mh|5(Lwi z9%ZV|rvXx-r|P78+j}$Gj1rzvLn#H^oCo@%-0MPHyGVN{YnYpm z(j|!LsU@K+Uq)vD0f#}Jq1rh8>v|5(gtJEAj3wg?*uPWf1>oe1;fyr%w`B?PTb6m^_S+FNEeN(=dX93!{=1Gkf%xG>e7CBR*C6wenTnXfOASb zZyos5V@d4UY7SiGdLwOPg3BQxG-d78A`!EZGz@nTW{pCF6V((=IYbi0jVq0T6MRZ@ zkkm3RF?bmEkO?XZE(C)LhAQ03(GHp5UUwSE}Pu6#|i}USGU3s2S{htcE zckL@DRYN^?8dcVip7PM2hqnHL<^+NKz4qeeMMg&9X*296qPn34j5R%r#~pd4ZW^r| zH69qS*2DW*vlje5QR@9$K-=Xpv9BMdx#KfCue&vgq1$L@PDLqB`d!Of=D#4xnh>$7#Vhs zfrk7|Xa#W|Q^yt^4h{}7A4^&W<4^62_SCKDO^O5PY&1RH4cbQ&6oMlP z4By~c^TBB@e@ZlwMbO=xebK5f=C{)R+E_741Y(Vz%$xxotMPGjBsg7NAuT9wB z$%&qPEKwssU0)M)=HnRhm7ra?z>*E!?4Y!p6LE1PcTSshqd=p;3rB$}4daEQQd@AN zK%+pTK%+ocXE|) Date: Fri, 15 Jul 2022 18:32:12 +0800 Subject: [PATCH 15/21] modify readme.md Signed-off-by: zhaohu xing <920232796@qq.com> --- flagai_wechat.png | Bin 149096 -> 56710 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/flagai_wechat.png b/flagai_wechat.png index 2788958a756db03326a5cea796f2f5d9b63e7bbf..e9dd1d3062b46ad8edb2b2912eee10b327c59c2e 100644 GIT binary patch literal 56710 zcmb@tc|26@`#65aVC-Wj+bC-!MV7J**^`JODwB|XiL8woyOO0SN(-&pG|5`Y*d=Ap z*cqNewwYoaX8RrWJkR_6{d~WFe1E^!@2+#sHTSvh>%Ojgxwdm-53#2J;X~GT)&L9! z09T=Z0GkY4u!;>h2LKKZfCc~nJOC$52H=7q2n8^O$^Hvl!_)!z?|KdZ$Or&9|He52 zJ+}%(?RT3$PmU6fe=uNqCGdaY^et5O5@6`$A9f)u(myO*QFr$q!0@1*1Lqb+sQiXi zeuKgT@>#>Vz#Go_MBbZ=;6WUF8u`-B!UE&u>}YLw*y^_+$U9!);lUij01y&-A;Q__ zpyCO44@Jaxh#LWbAJ_>#rj-lF->^Edr}Z?wVRyaWAOTeSYI{=Wl6e0(Fl zA>o}M^s_#Z-XRcN2>_g_-r*4!0D#*Ns#l7+5WWSUf?)9oh(QPr--7-Az^PmC*+1~j z-#EvdEg+ni(3A*xc}4gEfKV+|uNdv^2hkB4g5W)&J^`TsfaHN-Q*VDS9|*RE;9Vgh zVOubS2dJO@FFJnz1$%j&{gSB!ale23|9J}uj)KPPcTt7z z0{#)U&d^gF8n>aKF#9c70D>1o&mM7vU{MGL10ae1z#M%3QMSh+7{cd}^NzHD#uAW( z;5|NGmi7=V2f@~%!AG{{d#nEJxd3ZB2!>V&M{K|ayJHZ%)%KEaq?HQe)s8O|3S;JURVgdkWl zGRS3%ABYFIa=gFAR{d|Bh$!bR{*|Hn1HPd)2O${Z2kv<;!g@>gE&Pa(NC^7P8~l=g zfZbMGh);OY1^;7PbG$|8rMK5_{-q(f);HAr?=gXU`bM7E;_bIwKE9S#5Ddu<@9_;i z{-7KgAf|`pPxMHH-;Jj`#=TA9{cTFa`DV zXRLgoar`ftIaML5dYlJ2O(6OZZB9u}89+hlBsU|Np!R0DK@m z?f>TWUrhKVcm=#2-U#o2*THK5MK}>&3;zuNum%6!zxD4~^!v9r=ihT21kFsqzc~NM z>jl+p^|`eUBKN%5Grx!Nhsp)t*bC6Q1uVkC<01n5{4Xe)Lz>@L(Js_`_bx@9Jvur7 zuw_TKZ~)+!(S^I}GjV$}|Ar`~IIgwR%V$NCCjdE8bBN z(SPW1ZaugFJ^%^rsBM4(paiHx^5_8ifDvE@SOJFsN8lLX4tPSV&L3LO5kL%p1(JYt z;0k1EZvaI=G4K$03X}nFfEu6^6Hwhe}Y zslYU0Ixs_+Da;CX80HLfhn<1>!9rnCFf1$ub_JFPD}vpJJ%g3Q-ohGT9k5>5FpLbF zhy8?Ya=IqW%(ah&Gx<2cU|&ymWJ!*P@20mn;@YK}&ZPL8h} zQydE%Yj6P050`{*hikzN;TCX5xCi_kJOYk`XTl5M58$uh@1S|^hfl#5;TxRXoMN1c zoLbNdw&gs|>CJhb6UUjwS;YC2vzoJov!9d9N#SI133ADCX>uLlvg10z<;NAnmCjYb z^@OX6s}*z%RR%rhCm>;Av6&t2uFk$ z;sPQaaSQPhK}7T+z9B#!ejXH$4v#gD2Tv$ZGEV`|bDnyhKAu?~Iq}K`=+~nP9Wvgdiv+CbV0~ zPRK_HFLYa|TBuKG8Oe)OL7F2yk@3g^WCgMtxhTvltSW3F>?Mp7zAgM#m?XR=A||p| z#8D(fBunJENQcOrC_+?K)JoJxG*$GWXtU^y7+g$A%tFjtEJf^*SgY7Kac*%naXaxq z@hjqG;$7kt2~mlC60Q=l61OEjNK8q>B~>NuB!eY$CErL6N^VNYOPNdgN?n$ECDkXj zE-foHpL)=1V{HdFSE z?1&tPoQ9mU+(o&Ea-Zea>Hl&M942YEq&oqm&(# zvC1!$M^t!K3|0J93RT)w)>YM1-BdGFYgFgdq}A-y;?-WLjj0Q&qt(x=KTsdo$-UEX zXVA{OJ9{)ZH1st5HEwHkYjSAnYX)fE)$HBHwQK*b&|MF9k+cM~%(bGmUTBebOYT0r zJ9&4_?qzLdZ4d2h+U?q`J$ieB_dMD&x>t1Xp}om_-|hXWqoL!ib6aOXS5Vhl7q45Z zOWmir&v)OweM5R;dJcLSdQEyveFOan{a5<)2FeC!4DJ|^48;wd46_VB?&sQna6f*3 z-TsXO`Ufr?cynOINXsb5=(*9Hv6`{3@gw6IlN}~rCihLIOchPfn3kAMp|_*G&=1hl zW=dv0W>3s!&3Bpyn!hw(KDg&##KEeA>lOzr5-b`l;g&X*nU-BvqE^SPimWEAcUb#b zzp$p*=-FJfX|Uz8wYR-yOR|%<^R|0tM>%A0DB)0xy@0)oeUbh2VU5EPhu=AHI2>}w zcNjaOawPOfjU(V_=a}y}?xf~)-s$~O?xRjeZy){cyw@4)-0mXgf^m87LOXWwSnjdW zZg>p_k|r9tdqw_rjDPso{& zx1pk;L7^>S3SqHfz2TbS>ETo751hYoo)Tdn@gx!!c{;N8f;i-`e~wa*N{b>#n?~P` z24h@fDq=-qLt{V3X~bQQn~k@Qe|(YiqW8rn>~?GtmYiUoa32T5oy9fbmGG(fZ;3XE zPm}nPf|9-@?@7)}UQKaJd6z1mnwUD1W|Q_JT_`;wos?meQGAK>QoyCI%et3uU1nwa zWPZA`=Ssm9MwWNhr|iAiH?!F}=W@Do^>goC<-Qtvb?_Sc+S5Fdy!gDCe24t%>x$Pi zudm*~+~_FSS5REYR~S_|b@TAe+FQ!EuHIr4`4@e?eegEnj@+Hgch>Lv-0iz(cJEcO zeDRgy&60qUq5C%Xs~@O7D0s;IF#6&5N3M_B9~(Y?_C)5%l_$*7(9)@=&QDvO89aOT zT<&@93yv32FXmsKdf8KESyuZ>>(zaNB;m?y;C0mNALVDt2jAGgX{s=+cvY!Vd8bOO zDzh3^9al}M@voVxb+7Gxd+2S`JEM11?{~j1t=nFA`-8-X+UY8K1+%ExXLCAp zb@SHqJwHzV__h$fKwnH>5?Q*ltiD{iVz%;`a)R=W8bM|M%=)$MSLy1$)uuJ4wFz1X zjlP} z0YIz=@*htAiSvGA!2ZOh5DfeK+N%B^@Sj+4>jmUP0I#6Omb-4z006I{OEnRqZ3X~a zuKF=EfJ0aJ-wUeQDv{@OA&& zC%)W5=7!>m60084=01DqmUqTBbHbBi7ILhLv%u9I}_K9ADD z@R_UY*x816q{VQFP;V{3QF<=AmoH^?RP_VGRE z=N}Llc_At~CN?fUB{eNQqjPoF)1Q&Cw}T~qt^T~l*Q zYg_xrj!%94178P8L&GEF>6yR2eV?71|3UrvYjusb4sL92$pr)8f5iG%vi~8M2qYH= zCnuZ}u_YIbBL=$QBAi^?_i~GxA4Pbb7u%te#3O$2+WqoIUL{>;ip1H7UOq|XePk8t zmT12v`@a(``Tvt-{|fdWxke%Pnd5hXb8x`9;BYt>Hy2d6c_43#o12G^=Xc@zy9oR) zLR$s-Z(&13U=R&XPEG{$UznelU-dbK zezAefm>5Y@fV6w*RQs;zH*a1WDyABi2)(U3y-zOT`Cc}_AwIbjV!*=9uz~s);5#a5 zNtXJ2v`ywi6)|(AZ(r>h@l%HYg-N{#45g1KyNPc)iDv^zI&6R%<&RwIq@O&@28wIh zz{-7lX4w;b_XzGR@e%bb0d%qBu8UIz{k^*BL=p;42x;m_X~E;&b^)ca!Pp|tumY`-V>dp`)u?ImdziFiOr=OE{S+`&aqW71CJEoOI zF#qSItd1Z}g43f+mJdrAM`8n&uGi?6*Sc-NKlxh8sWx%UR=ovaJ3I3Zj+rpln5AcO0nO^8uX zuMNEO$s6-Aj^Jt{%H?O;!I(-%t;7@63123`od&IrD24`j)b}M8U1Y26d+Te~q0iwQ zGt11NX*MvlK)VU{&k!?Ajfqs>uPuchrymhuvE#PBSt40$-;2y}>*+@2j)d>CG;`*2 zmI>HSyHrIXWD-Kp^?j?o6;Ayq6*h{QwhJ-m1dHH_Sb5P2BVCAxb zl)jVQc4N`ciA86kFE|U`n8Bl}m~PD|zNqmqt8@09{`S_#L?cs%oDgq%F<-hDv*~Ww zi`4cd!P3~UR;farfXFe88?k@!Cnwmo;%%MsdfM8bIpGfhdC$A!f~~I>#B%o^L*c4P^uOYn<3X!!11c z4)Z@03Dh<5KTZDadJWU^KUK`L2h7JfgUU-=g8Y@c5xb>!N{~ymZRFb1l-j5@gq{{w zj16%9r0Nr=2}!ig;!6ad`nP9q#YtD-rJ|Kd69WLWqG9YFCWBxAeqaO2lNdHYup_R* zJ+FC&Q7^LW>WOT?Gao<4oru1KytK=$ZKMl5OMbDAqCv_gIf|hSwo@YQsjrCn7h~Q> zOE_BOtbrj^B+Dkmk5w;2EOP|aws$Xny^n8T1FIVE=MWjt2t3Ad#TzgwmH2<4s@0e! z7y7wadzxpygQMuEF&h{`Rz)YDZ!}3-CFG*XR1^;zm~9H9e`5n+QKAtByg+CmSVQA0 znfo8GmH=XHn$R1R+pG;dOZ6%nXgq5wLVJA^8tRj5;Btx3C`(?GqTNeeNh5SZl+^te zFHfO{-!$9RQsRsVr?@}DlWmt)3G?tNzcww|nV7^7C!jceK@iT)&IpS0W z=BEHQ4nOk@2{dpi;*Wy=Fh9$boo9e6iFJaGBh4$LwYl*VX#!tM6GQ zOWxR2RA!08Oe84y%jl^RHUKx)o-rzh|1fy@F74&_agz5LL&JsBVXZ5pC&|dh+nuu7 zlBf4YU|LkOxYWKGu5R|dMjZm*H%v%VzvRtNsK3r(jUPE^tD}S0m^DUIg2<$_rO@C? z6G?vCF!7hquOv2P3C9t(4dR1yQ#Vn~Cml203J>qQ*rt(PIjhJK+fF}ti_%Z~4BAue zt-e-u3y$wA*fb7NZh0ns{B%2^N+#(?5vCE9Rxeza-{PT`u~bIzLMlSC-Vwb>`i5`G z1vTzMj!3=E0!qNm+|I?mUjLrgco(prKCwnCO+<&TB;&>FoF}+?LdRDg>DW|0M83Tm zpBu=S0;|Z{LwD;<7HUk@zm__R=$AOOc=?Q|Hq7K`Tve!{pJS0G5Ma#9ITYm@s6TqL zc3^Sp?W9JzhDtU{ZVziMAa<1Yf}&bT-{p@I*=@W%#I!iRLFJn3P)6r?!+7!FQ{0fL zJTt#a(59I{wK-Zea=g4ovJrl#Wrtbn5*zT7SZ0YZi*N`=0`uM~o*NAPVP|Z_t+37X zO#99nl8HK8j0R&qsFS6>S~z;8*;J~ALO+>%yTjQ4`)J)8^EB&zlIxrm)`F5k3jqV@ za{fi7nU&ARMl~d_=d~x6l1!dK&Ko)mmCOdj>w-{8SfdLoPwmtSVAgQsb@ZVxB_<0_ zAU~z_*x+P~!33(hfMqSzcYWiE5HpDEIP!}|!w-A6x|;I$R~~zRspU3q{nO)EIgxqH zGp`hNCE^<4GTI+MgeOlfiA_rz+f3iOx1W6cXyt3NwULY%ytU4p4RFEU^|FC;cQt3O z_DMV*aF^;ZdTp@0-c2D{&=qk*JuFghV7zM8!}c%%&5R?qXYAxVR$pr8PQqCLLlGL2 z6-fOG6I4=~X?cp-q_~jg=mU{sBqh2O2KN~)w0bwPg~+$Y8@j#HVV&D-gO4A7k@^HN z7oAJ7A%~@g{!>FAp&Z zxoYOD@r-g6yCH@5?Pqst8v^Wf$rK@3g_nPj(p6z9bO(VfyAFgVizqiK`{}{(A zWr$*??4Q>Penc#5KgOx!8;yy7eXG-B1E(`s$zPcXfklrz6f(}g=>8o2`WCX+Z69nS zhz$r8fD$YcWV*yq`oV9ih9TZhCbT)C z=I?Rz1%Jy~aJg~z;PKb|3CEleywhk~ zi_G9JJ;7{X_{FN-37vF#q}~SPLUEFnaa}zktajEH%!6tobxDf;fKWL8)?lzTQ~g27 zLw4_P|KWXib2gs!K@~D~G9Px-%}mcS^8cw21n@|rFi5R_gPte9*zAXVhX?`O7Y~uO zNyry+`{_;P*mdFM}9X(HE^#?M92@@-zmegi9 zAD)C~KKTkNSv9CX$OKr8=&3s;sNqtJ8I&#?P$4AY|Jzi%O%f?b!Z!4?`G2s1j-V-R za2JV@Vr&A)ec5MGsIA=BNr{%z^h0$Fvm@N709y2|1s{Hka55L+qj=U%$wG* zkaAe)2nGcnkR`tEeBM-%QsWgG*)QRDlvZ-Ew8!he`*P3ZgB=@h8tr8uibL-0owAT zU~#iS&q*s8i5+Ic-ztV==(03#Fc5!^4W#Mg#HcqIcFM}%ejdD2d>(U}^c@ZDfQCs? z;}FW5!EW+r16|KuiLa~uT-(UZE`Kvz*4%`857z92KdF3tahs9Qd5@ZFakRdM10S=9 zHwkUUpo(8}?v;A3S}GiCU;QCTHA5t!>~Uy}wxbTweb?Y@yev3MiDGt>dZ=R(O}wBFrLL9)gL^Fn2^om$Z2L4F$<#AG&Br* zQVR6UaHd#G=A}A|Nc!#oG|$S~#gj&(+vKzQLV02|kWl_W5L!`h3KucY8JZNE?0rUM zxURLe7XfG8E>(+N@QR4wy4cMSpKP2IF%F>Qbvl5Nf&-)%`_j^09H{0wg@5tAeg`v? zCB}%Rv^RGvjKtP=AGLq+-mYM>JMN0)-bDT7S;Au;LE>;RjfZ3^hYikcBblm{zqcxg z7O}>}bibE(n|KV?KwG&U%%36V%B;66XdRg_GI4P-2uwAddIam1Uh)*9D`2-VA6!6< z92|A0THSl5aJ|MQ&oA!tMZ4heF}KxE9}s={XK?#iq)Ccnt^y-&pw8%G$q!+(eJ`Z7 z?ihx8s9zJ>gN!27;e94kJlY!wsqg%f^xo!Oylwb(TfX5GY*J!PJgxj^<|~PcZV!K? zHaJPPs|%uZpP-ECYQ3MZ{3y?w`*f|#vlA0ZJ3hwQHPu0p^4DTj(T&ib8 z{wRqdkccCihXZ}(Hn`Ea3c;e&M*Z5TdNLkGGLh;O3lzsq1K z?=`lzbh~*N_@$d?8an;Kh{b!JGC6}t)~<>MuZ1P>*>ZPR)s>CEi@VRvmPtV)IOv}eswB)=;pQ^X*85rt-D2C|e) zb*R*no>1i5fnL>Mjn_f3;tuO_HgIMG3R+#GaFD{Kr#@Jqekex=m^rTFnc}FiMf?09%O3jbhG;) zbE*F)+-^aw%^R>1gZhHL)bEDOk|7nrjCWuIB{BdD#oEpWJ}$k4#AtR~djWO%oeks% z_TZ=Rn-X{xj4(95(S!H}$es>D_B0aTQRR=O^|OJ4CCDzvKNb?T83N^OMu?n2bW7$V zypMJUv9e+aoef7Vu8-YL2)B8Ku0Ke;k+uDA}*U4iR6KC%f zLo+iyk7S72)9~%QYJWi~i!&Gf7-F|s4i67*5`TJt{4D-`x=&(bSnrTb-WRmUsg z_*8dt8CHz^+UmwLmB^Lc$azdwF~yy*!M%m24H?`R_*B^M5Seuu3XvhvwnAj*ymBLL zC(R9cNFX14M~{E;T2~koAg#4;AIWO*kV8$BG`qbb-4 z*z1C$PanU1yMOZSV?NpCwH43zbZ;D~UKDgE_!q?ICI?qNd7r!=?r^8%Osuraye;w_ z+H2*K>2B~E#iO&uRQKJ*_Z#}lak}sCY?6j86ZaEt5Pi^>-}$wo(KQL_r4pIoPCbxl-EV!(#``%v&X{PMMdw%}6amP9>M#AWNTdx&n-e zyl>?v->I|WE_Y8_`yc``7&||5=bLHa0`ZM@ENzAbXhEqTbq6mdXl2iQ6tA>OYMU}j z2wHvh7A+C0*=(v;DQT+mI%iX2KmU$&R2dJl#c`+xN=pt?`N_pCCVs7(tzW)Q+$gQ9 z{<1ydc(dZi?!meY+J5FMygyO_*DJ{qqn>*k6ZN>RhR^xnxA3wgQYN#O+Ma>sr<^2D zyeEKoi<%t6rQt`Hk7<15HZ&xBnjFT{`Y8Aod^U~6_nM-Tb0bnYA6aPy1(QnN#!nZH zA(qLHN)-&B^dvH-pb5^$x`Jtxe6rrX50qurNq*9;uSGi=FTu{Q60EcVD58Ay!@NH$!FV9>gd)r0<;^#w6o*UjoKNKMXd7s<^-F4uO4jsRe5ITBC-ry zMOVWOE1;;QsSNk&FeG@e->T4}CeQh`sKKJy5L*3y5?z)1K+&mnLK`{}MRCavn_h37 zGnE~m6FI5eZzpy0&3?m!*5PKWgyBTq#>Z#SeCVmi_)20MAr&pUSc%Vg_n|fW(Y0rq zyy3c)3ST!-bRmWr)vu*E=^gqq);CgR;^BEwsW{$UcEI}gdQ0X9B9wyXXNXdnPtsn- zn)Z7ZoM?{ApH-AV&`SraqL8CGsPxGS$qcG@GfS=}NsmJ!kBYl?1UBtyKAb_XO;GNw-#WK-zJYs)M1_MH=v1o9pgzsJ)ld7uQLG(nVP?ZShQ$S$j!fo5FzSWHG!i}+KPIA87gNxo z(MF825K1OWp{L$JSwt9Rg~9(jeBFRTbxtT`%rBsjn9-X(V)a%dpyrmAmgxkGn2UHq~g!<(IpBtLLF@1VLHgbPeW1^xUzx zY=9WX23mR$OGD7;G-Dx4USieGWH_=^A#I3E2hAau=5{U}EGAi=ov|0Q zBnDaj$0Q~l%6#X7XNe3%bh?&&D#R9u#Iwl7xhN~(9aSZdR~_mv66S~#3mYT7(^Q_fba z#Byvug5yTR#<`i3;8D5|R;Rjr;IV(@>rc<^BlB$Krpsc*W-v{;qwr=KQd-^K{Y66- zms!DAU!I>|IE7rIQ`A^erOmfPI0Gs33dhQcw{%~6e>)pr6=%Q@o1{h6Fz+Z*RUaly z8-+N_);j6hXw;e!-m6Ks@+Qr*0Y2i?JvP8=Dg^eSP;==PasKvM_3?Kn$7!!pT%q-m z$l&W?c&jC?X!8v0C|+y{W9=D5zhA%{J;69cL0vwVfU+AbTMj+Pbx=9evD7SxifKnt z?1`5=cYt4`s6ze6t=<$n(N9w_emW5bMPx8hhZ2DJGqJb3mv6C&rm*tP$NjQ($#Tv!~%lIJ2 z^nww!#t%a!kb; zpoCY`uMxB+>~MY`JAI1D>yUvL!#A4MbH_Tik?J`HmNS32irtQr!rP!IGMf)Zr)`q3 z7Rp7Yg)c|PPJPCM;n$(I1Rc2dIve*lQELHcw-pBWzJ(aC;_`%~0wC!Cw zG=)pXhA?wjlDGkb5qwUaHReK38}&voHmf9;Z1SQ5?H`srSwUos!8M!HH+)Z7x_<|@&4g$R=VNVih^+i_9`3QoU8LB+X8~b}ghEo;VdKNI4j%dNp@qB_B^Irnyz&it4WL8F#Nci9-Ga zWag6Nai1M23NuG*6XGXN7$`l`d3kfgmL-CDR&#ZO*4JeCOPnxki@eORqvNnbl=hrZ zEbQs{wPzlmU)PQ^9J27p4+%FO2fB_d-w1f=F4q_*dZ>80_9LOW`*JbmFqFURoTaL`tP^_bEk7~%z09xmfXOJA;dZNi*nHZBSJ^m*GdPo|!gDeMHoL5DY&)}S|~2_w0HzbNR|7;ljN9U76r4t|vL1EMGOFpFE)W{W7V@{Eb_SHX@^kErGaYG@QZLWR0{D8sr9br6tVS-TD!+*Et9IOq+7=l z9XVq*Nz0JxX!J4_(a^}=F_VbcY*}Rk(L`aJMQ^&LkWE@>2D1XPV}&f~&fk{M6|#ir z-UOK^>#DFj>B2qgCECv z9igv}mR}D(J=){ejCrk{#BiKKB^PXJXTBWyL_PFPS~zxmA(MVDkx%_WBAPt_9gR{H zN$D%7h`Ri!rZ3Ddb=r37#`R4JXXtMW&h-CEk&D~p!w*4Oh1q2MB$S_q0*7H@W&Lfs z#VOFEy(Q*az_lN$5|{Rjxb!cRR$He=JMUUF6rqGfmm; zm(&b?)R9)u5BV5zSQWo%*~-2`7hB6i&0z($AL*4>QT;VTFY&+mORp^V^pI}QUXgnO0~LyDYlI}?)MJ!%y%Iwnbo712Fs#pOqMdx4{pb?8`s(mv zg2q+U!vL;4d1||~QZ2__&4-phviL=YNK?6scj-qNF_2Z9ytKKVS#|r$3+9dqgUT4R zPWfb>p+a(9KTYm;WR-gvudt6O7~6mP-TQvO3rM-tkjCJ6KHw)>0V~RUv`q9Lj<+OMJwgap2^w-Glxebj>%_aJiq4+}hYcd1=#1j)DScPSxmaEuXy7 zId0-Q;Gk3y_ABz|UzGEhC+OFZ!^}8I)t#y;zGNI3PO4eSs;gCAqY>V?N+&iwNK61q zkl$lyD&T&)IT%B`)L)e=+4%Iy%^Z5&yxbFml#gmqg5OIh5jb^W?K$bPoMeSPpT@gq ziix_I&x98Is1fZ}AJIRkn|FS5Z6N+s{Qab}0{R=c^QRIf4*}~0aA$7QWcEF6us8@$ zp~=pNa0VlX|MJDuB?#j3Sxq<@YDOB1??k}FKKBA<#EHYL#rZ#iesm+o?4QTF*%JqV zylMaAcV2Vm1wAu+zNZAX)C#B@`+PO}F6!QOZO`tbiH$3nKf7~SJHBGID8y5Pt9FMC z3aXc>Z`K6%Vp1{Xvr92_3y@CBsHT)=t}VRyqVlcQ1)A8VYJ^u`qH)HIO(SN+-ii6J z>h4MFD=s0s9gdIlIVd`6<6^h9?e$!Tv?C|zNbPSmRi=DtPxd0lPQ5;M-CXJEWa>f+ zsSIzC&coZ0ZMVtinl443D_aA45L00-j_GLG-NdtHh~|-CXfLO`3nck^?dEIGx-(n; zk~s99jA@sLliPEbu%`;AfBfw8tr0u47$e;v8xi}x9?Fm<=P^z~8Iu^YRY479deh;+ zvfU%s*w;e9sVKx?y)HAG7^s~?6vipUuDDX8%T30+3Ilc(bgSyTEKlZX%2|bx@#V9P z-6Kc~=EE|*T=C|oss)9*`QJY+-c%`XG$hPt@dMJJN_30)kylle3{3wjcXWM$ zsM62df_D^B-8wOpqup1~!u29pl@tsZ1Et`8b!B#oG>%yKm9G+t@l&_(mB`kWteTNZ zyl9kNm3XRP@>!w%VP5lQC@EGu8iDqAM6!B4LZ407Myd2o*E+WaoW7bk-l)gitqBtd zs^jLVkuNC>$Sr5? zZu?YM0V{TKD3Ene47i4pALS+%6Gr`L&%q7KRICYsZBSB z>9CQYna9P84y9O|>cLl1Ejr_tx%$@3C)S9Xk`mx1tJ;9dPcJ zxpX7_vy>tfG(WzGSgL_^_C+wP6x@O5UWn0mcsdkUvEd`w{e4fz6bxQ>&4@@Ra;j3O8sp+oUQIaw2Vc@%pTf3JPU9aUj+Y+owI5O2sknFEa z#*=ruF7?)adaal;CKDwN`&NVDs+aN%#Eh&f$h^*@RI`D7qn4PMSJBWG`$~u6-$t`F z!f;Bvq$ywD1(mv*YurNLPM5%vl96S&H~Berq!x4~a&a$(NK2j@YjCUCsgMAZN?OSghPt7Jm-;JGQr)rvp*HBBhu)1f~ zvzjb4Pj1P1=sR&c(Z@lPrcnVlz?Juyr*g=ms;aUZN)<`$mk`5XY(v$D!*ae*&JJ1+J5J^@;3Kh zRX9_%n{tPO$PQ^a&sP+3`FHKTRw*|%)8mBb8F6e{0sNz5BxR1I`2~eV@71v3^nZmo z2Qx!wyY$wJVGSrBR%ox%$P2QXwHu;VA)ztBWI!b|JUOJwksAB}3SX$Bv z)4ZWK59Y%R z@=7!0x2P6QqeQ^yu4VW8HW+=L$xs{fw+`6+SI}V&7T-d>X&7o;iTU=G>m1`n_78e{ zmh^v2R^Q;eN|z<1emZfTCrU9Q(apI) zVEyju)FYc%2gM_|+6Uhj=b%w_f^;;P03GMMC1d2i%_&4tuDoHvQ>ql#kB75?0PXa8 zHM;mu{ru9=u=o299@Gt)dAeYRQpSBLrnn^FXL4JwU@{Eq6;4{dJ?EnO%|u$;tbT3t zzCq(;x?54w*JZ4MmdP2V8@E6ZJxd2hR*6U#J@e*D>!VOa$+D1p+qVZxgFUs$fZ z6T0hh3z*0tNE>S>PN1lj_h7_(#|O^V()VuU%Qi#i#si>tNuNztNtfWcv&Yg#IHovPeV0uWB3gKg(9Kz0^@7lm zvHhs@y0g61ms13QI8KF`kwnw8OejYdEL%LnlJ`48mCTTTBza!+W9umc#WPtKNh7Xk zKInY<*%;D3jL^`?>84lHwq?`S-7cS=8k4T#x-f4XneaACrAttD&83kHa2|I>Wv*tK0W`@bredzSF*N!y?vU( zOnkE&J?#akY2fSyfo0#I%m1Xri^?2ElRq#g!XW%^%8RX+NtshPEwy z0TTAoUGa^UQN_bQ*Uc)+d%4zWt#_or5IUbgJsCI)%Pve*dt0P`a$dKIno-t7zJBH~{Mb zG`t7%68Eo&fHut&T2+#!%KhYjW)f}9>6RBe(GSf942yUfOKOUjsCrazzEG3XPIA$Y z{#>~MFb^7vA z6r|ocXG%PnhwDH+O0(O5)M+ARJc~rA`dk(!f?cDxMq+X%D|TO_i;iLg-V~BQw044N z=P2;*#0Y&NlFo%bz!%he&9N=ZigV!6PzHoP~FMbk5Ls~5`Co$9|vj~%} z0K0905wyQQqJJ}WPhvBwfafplduRemj%ETIEA8))i7Y{eJ{57f5+yX;E?@OF?)M4> za)hK1REZ4(x)E+{LYTVVc8rox-24G;e`~nmZH_dY(hzl`ab*EhgWFHZxjRGP$AFH< zw^a^!@9kJ!z#y@Dkn0`EkOO^ZE>OENQA@oAN*Z;ciZq1*7zOfNM_)8fh=cP6$)Tol z^XnDLPt%vkiS_m(cW!ntgJ}Hb6nvtoMy!DaRqB$liC-k`O88t}qg?VHyD8rzXub`< zHX_9VTD$l3sdR?bKMpK=J%VPHCk+`^y3K+2ST`HkzFKq1|I6jHXXPEk2%puPqAtO)XZ~>e-*m zoc;WhH6zib51nyNp`ciNk*~0x;Cp1VW;e2|(xqb;?%b;J_I9(X&EgnJ7`foZQ+&t- z81X5HWRwLhfrx|{i7(j8lztX(KNP#<=6Zoi4^~(`Y2&2rHoGI@`I!7>5(jezU|f$M zf5?94G19gpb8p&lNLh2aL&y5Ij`pz!A+PDZ#?BbS#hC zzi!C%hiAV93I7jE-yX=+|NlQxk|eobmng}tau1s>Zb@_{VwL0;k}%BXehWjP+)JXQ zCXRqh$`Fz}7bmsVf5Y}0$OO18@460h^ zetoEGkT)U8VKL=VVzg4k&D8V!B^V8~F{gmwnM;_KS$C+bCWN1Lw(L%Y{lRW7Ax5#$ z_jCH%LQN``W{hvAsl^$uiIl-BAT~Il+>~C?;Ear8s!2UGx>XWrdR05QQk)7Ee@Dl4 zVPav&=v|SYonC&8q=sbObW3hH&6hSQAC~i1je8($4xXq00}yWI4YgDD9> z!3o}$nzM}7t!YkbRl4VbXt+9|P`iNE0ref8-ni)tHjhTWbGF|9P)a+^iaKo`wpAqj zheGT^3jmlR&vW}4E1aBq{Cvb*`!mJr#(0%f@M;T#Gm>)$|AW*{w5`B}0qt9n=& zPgPZ=@yG|HZvV|^1)+`rI~0;A@Bp*#&aCZ0iJ%_U=WiQ2c-;#fIZm7elrDv^A0;rn zg_TiHU%qA@*-^SMsKY}BQPlO-=byOUdibNw3^Y!@Lney2nNg(KX4TWPjccmGB*FGD zgf^BMV6{GhPdP^nuf5{(RJ(+;uKd|uOh6y<-gS?Qw!-MBD~S~5Dl23H&a8v%{hT*! zCB};7ILg~SOd?nGP0&mBS0Pdkg!?l7GMR10P-=yVeRHzG*wT5hu^O z9`(nXGL}raGX_2P40k8Kld1++G2Zk(P^8H|SD;xt_8+8Pz2k(#@02Wc*lPY~Q?_?E zSCK6ZP%$hiPZ!BijnCf+^8E*w2`hT#hK7qnAW-I77>$qvwSzAx=7khi*w=NxRdEfz zLL}jJo)$RxsjOD81RYVQfk98I7^AbGY`B`Te?IoreCwajwoCTLf#4Kf!&py3>-=O} z)K45SvRda(RL}q8i&#I%&`Q4+!ZfrElnR~5uK?P{cQsVy`=rt9n)}z65F<$q5T5&m z6_5u=Z|iW;cBagwly9_SH&w=xewI8szmp*5W_v2YR7gozer#XZ9iP_uAK9ByAL95O z?fxJS3R6KKm?Vms%fuTq3cKWQP;?6#(q~9sLP1JKPsMYCVB10Py6n|ElARU77ruRC z#D0E$BH!c7SsUfcRy^iU*6^7umxot#l;%zN;5v2UE+MvGsVQg=R^l##zwLrL z3AEF9?o4RHPMi-sc;mwccL>QU^6x1Up6ZM7q|HYJreEf|iUSA+f-B%hB`Khuj&oI} zygh|Jm!N&KwCj%^-Unlb*{^kbY{r5Q)fn7somwDVrf&QE95KFNy_4CXp5vU-T&+BE zsU-Q@G11M2?HNQ7rb@UCz#l;!9cfZhC{_5V;d*P6>gM(sa_)oDHm=_m1k_qdsW2np zwd)6zVSQsxk1J18bXQR1vf7p{Q=DF(j8delnU5IzX1p&Evy`*msjji(pS~FryB7<& zmUixk+QO##qU9^elcuffrgFD-I^f(%r_YY> zZmZiKOJV2b2sy3}&va_$JXE&nWPu*w{R!!1T~g)Lo%Hv!so;8sDb8UD1Ya(BI(F65 zv53)U5L&p7vbpHGJAVlTZOZK@e5s9Zho@w@=py9rESpceVXBGh$5Rja$3Dee=Dhm~ zl+yo$TxoDVwt2TUX=%@gl=r4ne+)EoxFMjj*)M`nV;he)nOuRX*5I*MQr$F1!bkMO zy_KkiEw8~nO}9}9{S188JIJRV-jaI5_pY+t9S5tDEIH-a%(5~9DF zPnTpS(F%w+&7Ow@{9 zC%>*OduIC3eDl5AEpp`YYH=-Ulf{SbKj*hP|6xUD3~VaLA7qIVdNa-*ekhj@T63Fk za1WTw!4nFMfpfbEaB10go2~}&>-SEpe8ok4MDX^c|YCI}TYe zvaM6LkXLPqzM}0vODO4hX4&#cns+?n#M`vP=4KiYv1?`!%zqOg4box~HZp7~E|i%V zPV1h682)|iU~CqM6Ece_!doxyv=6TX~wI`g+&@5ToHKp3J_ug)#|l_(cA-U zi>jf(b;unc?GPEGDWQ&{!HgT>8Ghc3_MrcG05R3ZHuc3O>!X@@51Ici1t76?iT zq@{g|_#M!5!wD{q@$gFt)nC0u#>y}0U+={IvOUtjq#*09wm8o_Xi4LgXs zR6C?gf;b|RJoktg(>oQV<)3dNyt&^)>mxB9@Ti8{)#b>m--XV(*;}i`9`v~CA1enc zs^Fla$^sR{C~PGp%B&~(ohaC}t~akeYtijWFpy%3FJQhb(kxh};5VCA0t9yjU$=VS z|3^;eKs4WG_ca7qb;Uq-@1L9rCh3h=qOZrSX6=8Yq0w~u?WZ^A4ek$3%oA3zEMKH7 z1=ppa-R1~ychWdSA-@!SbuU)>X}En3v43Lioa?`U|Y;yhZ8n9=!jCXdduY1_FMRltVKu(=p7U zpN{Kk77D9%6cZtIHn+Xt90StCns5O!k?fDp-!F(0?7uVEpZAQcRPHbbWf;Uy0>}@+{srEnwxm#15SZLTIbi(ZnJ0S?qqxsztT zQ>i-<#-5+OVRAG4@vF0#Q+CN!YEMd|uC=Tkzr$SHgw2z400!4?k5!NWbFshC6C zAlT*voLIAb@U8OB@tOTEG%p073b#SeAW)Kk%?B^P@?{0l(bDGh`kF!C*uKCXgYVv( zyv$WjIZB+aLezuFjH97Kn;SZCC@fy%zmJhe^XeosmY`zCMf=_j5-r5zp_=z^qfEm zu`aUZzl`hJ3f!6T9654*fPC=?y; zfGKpael?bL9X(W_!+BfLq)VwVr-W;)ow#j}6^zwL^+$~8-`Y*u6okeg1W{W4CmP=X z_O{i{3EGJdBE>hOY>x)xy4OJnihfJTy9FZ?h>H^ee63!9hR`Te*S`_G1-8wQhnUvO zAX}6I;$0^kJTbzb6=e03B?R}sQ2w}c%6m(jDaRl^2isfW5#x(g3#~HSBib2n4qZ-d zIl%mb5~Z^!pJU$vrJY>a>M>s}jW@4k6KSyLW=SddlhLghp>50&=gH5-Lhr5&@J6(@T=$PAdaZ8^_p!Z@lyu_4W0hSgH8sr-|;43lUojV`U272KKEV@Z2QzUaVR7jk_>g zQkgeV;^gEs)hb`{>A}edJhyJ%gPen$+!N}0%%DA@g{gE5Z^WsFdpZqH<&w8xr>jSE zuX&zu=v8&mcQ4FcLTGUZ5p=s>PE=?#%9`T2rBZiJ&fN4}>A=p{Ik0{$FFQFfI0-Iv~uSyi;rSTcGJBRYg9<4gi&k zdoPWH2znMK&L8>IYDD!d7I|iK;jng3(IF@90c+MploxP|A#5{(mvQzvTYY?HI0Lay}H7s}e7&Tm2uAADP( zxx})rU|P*|{UXN;BnyQ0Sr>nv4u3aS+8dlnRGE%20nZQ(2Asm*wjMp=jfcGKL@S@q zG+o>qVSLyd6st|HGcy1h6~hOxZZMqN`(U}HJsOoB7f^Gb4Gw6J6Ru+1g%fimY5w#i zQfGlSebx5hNH@X#+og0&&?LWH(iTPm89-idU5{5d#(>A8G~5n2IyPloJb3c0-I{(i9aI&nic#enT;*Bi=m6l?D&R;oDun8YoVF#^ zAiJYjwQB$Mx@H11h*RGr%HXGlbOk=UyCR z1>vT=*!MV%ga?1P=TH`v41KnWBPRaRsq=A5()nNIPq&Xmz)k;yxVZs-&{mEvOsq#8 zOs%@~=)5dvcx=zF=9Q@?AF2kFyc3ISa>8Xf$(tO^5R=XIin_`#AB$RN=>dN2nCkUb z8>RMh>j^a9xrA7!lD|q%#h(4Neec~2r1*en1QtBG&d(L@UpZ}~v8pAIew7|O9MR1` z8)5vMtU{yFwW#pt6Rj@7UmL>DGa{80CG&QEhO5zvp7MwT&8$-4+i!|Wg}L5!Z(Rpe3R^Kzv$}PR!Put3dZ)woYLTsjJ1GIs zFPK$l3S#Oa>&eRQ*tvRUS+Z89s73vK6(DgNH@pjB)5qceIK)~U6w}t)xCyU;jno@V zo@quPxmu1Zcx`SgVn5!%OX3_h$?^R0D0R=;^W*?5MWJ?Eh}a2_XM#Gyscf0~s=I4b z^RM^OI;c{+Kl0r{y(8Oz@xJ=gd9RV=vS8k$L;HkV2V)fE05$T-CMY?o>${xK&(p}k zZ`+^iy9!Q^81s|+21Qqr``jPK{yus3iOD{%CemS)yw5w$W?01@2Sdo^yFSWW3rz&d z!&ymSfIjL>UyQGvxu+6QK9Ij6r6PS)2Etg61&=~It%(015^QJ5D7Kn^&~Ea#mW;#C zH}VvMWqKzFb8ic)nX%MFV;+oukgmaJSifnE_@@5qO5-^G5GRvxZxSYfdQ`_qVw;WX z#-$W;WcE+pU3(Imt^0wB0FEP4n`{~Hsg#Ebr)rbNy}>yo;mQ#S-hs2OMwO<}HyCNS zuW%ITi}w2jHq)-w`#k0j z{{4AoV_lxJ(njzpoUDQ;!xTq+OBoA=qSJ>UV9LVWZ{i;>V|bBdce|BaU*i9upq&(fL#C$&YtZN_1ZcnGX1h#5iReDBSP) zZlcylcq9m&^qQbv35E)3=?WN|V~tMcjG4a{+C;ToBuzJGGyOS{P1bZ#@_LG~5k>gO zj4h~*7J4!BsZaD+m$hooUTg`{Vzr&Mx1Byiy$tlAk9jsU#(_+*d6b?#hrF#B8h(;@ zT?-V%ni1Rw4=LX+AYG!a9;j8yNG{R1^7YYuy>%qBT81M$n{n ze-PULmZqto9ggY|kE7bqW)O<6Q)m%=q(W3Z81m=?8FXjAb?(9*1Y|r;Q%sxEAut+!stsJ|tHyu}w?d7hm z*$0<4qnueNG;xR>&Y%@E&FX)lQ!hlK%HKV5jpeNtzD&Sz+j0gww59?s047Dq&m$o< zBeJyTqB_2MK6Cp9!Aaj@sjybR#SfH7FD4wVDp;z?^>Fx^6|+yUGO^{?AK_5IiTR+C zlW-Iud4Ya|&)WjVmMTV<>fVS3!#fwC=pUUy^5Oc!DwTZn=al&-)lFQbpwHrGY@o$g3GCl~OuQvMGw^Rsb?cj_y# z-OWa9k^*GZk5IX`-RSX|swi`I?lkPxKrr1_ykFIymp|21+Tq#JEfAGPKr!xv{OF-s zN!tU+{x3BahxC&3l37^CzZ31GD*hI*N)gCA3C_x|JoVGkpI*B6#U)f|mRq}{*-Y^f zZr5vDcWzGYJ>uVRK@45{yYR4NJ3)>M^;Qs~7gY0AyL8>?ztN;>xXe=|+$%g)Zo`SS zr|K2~J^>h&g#O;c){jN>-C48uxwn|{&q3o0i4WyXB?2jBA<@@fQ(uzLp3TEja4#|L z2;c-X6)l8%Pz_+qfVbn!>1nqYy_WJ05dq0r#|V#=ZYSwnZ8U|Z6&)NrGt@6P$BJAWrIgj?~F`96W1 z+k&c2X;r7MW6l8+%wgcxTka@yf41dF(=GRR(=`l(hL@7bs}KA)(#Io=aB+mX#n!^4 z90?pnSa9$ub6VZyG54X9rSl9@HZ|n)jTXYcqKXkSzKPmf^wT;2f^$!N%qjYlke}h+ zzx_Z{;psZy{+z9}hbMIaew#a0yU{dxEvj=fuFnlS($ohwFHt^d1-1vatC*tq`lFcp zvFVeO@cx!<&2W>Gdl1hSljh{{YD&EIK=(2M5;}}u6k+| z##nTQ<^G{cAgeZfI$l2yVUe2d2IMf-pM!*ivVuCW4o}5)q-gf_zlN^jWTp2>KRN4w>KRnF=32wFCCC?&89u=YJ)u#N@)b^@)!6+uyiM|jp?Lh9eJZJrp(svcJ7R7 zeM)?m!-t-|sh~3s$}*Znw^gEiBe_s#1R=3$DoDMqRWxWlcVSZ)v+t>Zbtr#2zi_Ief)Ta{0XMVSL9rJ_Oe*I zsf;cF3bXer0#HUDg}*Z#egj15159!h`8pdt3=pQ?3*h)%Yfn>hWtsg#2H1{$eqoKh zcIM;$cWTuupkI$j76x*VwifuY2X{1&*Tnv;iV8lvCzLhBPbPJeA`$Yn*vW}j*pZP6 zUucI!aK-Vq&ouHu*$}FU_YA1s&W7*DYp}Iy8IJ6$bvgSQawOsher51IK6KW^dl)`f ziVy|i+Eakjn`Mm9R8d3D_K2U6)B3aB~bt#RnfYei{RdB5Gq@yT>6 znZoETC)(xX85D0z=f0EQ7ffv(0g%(Z!;mEs1i6Q%y$TPxWsES^G6iA04yJIgNZPuVCtrnKn}&5C^~apael_sX(*-YwG5;FCx?YzcRRwklkCjVZTYg z&e`LjPOw!?!@G+(h=1X`en#&YPTM9hy91lLTbf$a0Keuk~t5O-5HK%CNtG$F6uwS)jT|r?Or8NFhWI$)| z^`7^ZYf?G~qj_4E!^?MXAsVzn@hI5+(XZP^!sYNvs4I12c!lY^whzl%Ue&uAVT*9X zg_8lTE?RVWYpy)BE4|4v$^FR-!a=(JF~FcYk>^ zs3@3T;ya3Qt$=npy7e=>t1t6bT_RUDTl|s(>8VL9w&7}h1?tqqvtK-S))(pnleL_Q zQcH+vOy%I}4(k%~1~aLaA)Hj9`F2FYDXPn62K}geS#8A+I8NxYeGV6QXBrv+H8cqp=Nsa`9WdSGN@b>e@+KXq z{iSu`cGwi{w?o_Z&zsbeFqprK7R%H-^m8P=)a6Hwxmw8H#+ru*^I{F=?D*#eVq6qszL-;g;83}&XWZlSv5~QC4CCxpzEmD5?Z~ABW zo5~Tp;8X3)^l`Z70HVWDH-FSq+e@V3H0eu`V4go>c&EMGsM)BcK`6Co|F@`KzBGl# z9}sxOvvTjpt!^HNzd?SN4ca=^oM9XDOh>>CF>~)Qhq%W;G21nt5|m+-)%xvck3`&q zh8s=PS_pHmq5HU}**b(D?|~P7#v;8CTm9hj(4-v@jZuYmfH5;V0DNEVoV26b=sDF> zaTlXxzu@w~;{6IP3sXm3AFK7iD-J66?U&W>7~b)7gm6mv3ThteU%Z9OFRsp{vU>l8 z%ffWgW6+x0Ad2Gx_?eISzOZuoYG+>AZSQ$c_$Va;27X}F3-%%47ekVwSy;w4sx9!g zr1B&?RTkzB$qX=5S_)Gq;FSuwbLHON@eHxxYer?inog!C(h6eiX^3{!g>kL|5_97R z^^WhFhEj-N*>{TJ%D+t3RtxSi)@u8G$7o!S>@$8Zo(5PW}AJ*vnw0nL;q1af*%g8BF6i0o#)J@r8%+ifjcRDEaqU zn=)xTiuxi?q=`xS0uJ<`wPKP{54X**etQ3*V(VSkO;D?H z!eJF^b>!hXO(p~O}65cd?P z2#m6@4iD7K>ROyfzB{0|=%|p0(4lb zEgx<-!iD}Bmnl*UJb0jrcjBo1n~4IH>KLvT6Thoh{Naf2qsTiOXTU*tXxU+}*fmq= zzqptGulW9Y6l)hcT2>^i@(mED$F1KuH!v8h{tmCv)Ni-;Yh|2ZjCm|Pv-Tf^>BeZz zj@B7aw&k_x(vE;g@usgJK271euruXcsXwTH;WV*i|6qyAn!hfWt~XG5KYs_)4G~m0 zt*{szAleEgjcv)6x;5jP$q4ta*ZZV5>etBeBh7+b0UFB;aHn<>5=iZM(K`Bw=SW8i zGw_u1&zRry*emu~Y#YC+y>K^-V4>*UU)a6jWt@~O)hltfY7N-jwFyv&-D(1|VL2(; z2^ezW;@8*4liVMR#AEmCM16^sYNZq($l&9(+I)Q2WgV5zYNN z>*B!7rMNZOBHZsQ@5)qQ+XJXzP6AAnJEWi#-+@1ECG^u^`qq>MuY5vz&U!Llgxf|q z@K_`8!=rff#4M@a;@+uNBJYU^Sj;deq0pssV#i;<1%k%Ek8knl_`jDhH3|7R3k5Xj zKtUn0FvuYd%uPXY=p^PMpuxD<*0BDZtsiJK354)vAAb{gRL?)c-1c${ z0189)3IM{4;w}li06q3sM^5Y%4{ysKLxW}Wle|m0*htd;wOU(RF808bRd|!tqL@)V zZ{DIwfIS#Bi6LX3A5X%F08dBlG7j&Nw#z6A4RA@@e@`m7r0Af$V7rWPoVw%{`u58p ziSiMHII&y5EG~cfvHE*-+frg-`dYk&^g)xAPOdR`h`WbVfHEjJ!T3F5=BadwSI$SV zPwtS!H_X%uvkJJ+vdSgXnIZ4FgQgoH={m@RLD=SXQ-$mjP}z8ctXBQ`4@|1MvMEO5 zNLS41^$Fd-P^v9(DhNj=J>P*Is@S^DXvHv1er|1|nmEgx)|}%75{cZEWEu7jsJffW zgYs?<%3D!a%1T(Mdbk(dh>GTL@ijb~7PJp67o;`WJz=ZdC#;1Tx;0V9^~-&d+=1$F zjT6U!soRAyHQ1?m8vqNB($G{dO*>lk&{c-r;x1dgJ263!h2O`;kmPj*O)hp0cMrNj zvvQrYlNT$+16(*sFmZ2!Uxg{Z>kG6E{?_@jvQTu;i^ySc(Ii29XR4;AL zWOrqzWob+p4uoblH|c#TX>Qn0P2PJ;9wNWjI!%p=#Uko0}1vgn& zEL*!qo<4Qh-nC!#0jq#;Ob)(Wv#|`g-Am!RZz?>x;9NMCw~ZrWr&0*_U&HiUU^31w z^@*@yy%eLLV=ua( z^=qYM(p2Lm;Dxe3t$T+maMRKHe!MUQ$$;?op;6WU51rY}ogE9{xrunbiir$^YisyFs zzj^SgG&i-E;Aih{W1shc>>PhP$F-|KkiOj<^H1cxC%sX62lqz!L{l}@l#XMog(JA4dTzt) z;>Aq9{I9G0V8|)BZ1Ck?3B)89N_@i0|NO(f6r7RJ7to?2$NsUK9Z0t(pQtDk!r0Mk zUrub$0<;x1ZS4Yb*nynaGfZiytj0sYCpdIg(JIm`Lp3wlJ=H0R^}G}84g4Zt0*q0V z;Sb-neWjvAkz4iY*;*}sBRU}=S~M&x@E5lacJd27DCJ#Z#q{x0`<@gR@GFgg%7%Qr z5)#9#0nU3;2-1K2?_M;*e0XhjxBRd$!mAyqWgP>PZ9h6$Gj|BQ9bT18;6{30ekKV8Gc#-U6`2$gj2$NU<(wR zQ{QLwAGZCu6c#}>P{{!_uI2(e-yKvxszN`AY3vwP*?N&g&m{-li9Z z!GwE5mc2*OlH5_uKKNs2s1sd3W--U3Br@X(!x*7>R8BMr!lrTZF+PPnzz#LGK3azq zfBT$IXGccqv};OqN-2NFVgpJrBm^WJdH*E> zfj|GH{LNT^eGu4ROuG}AXpN!m8JlymZb#pd`xN+XigxW++)m7Ii}Cz51cU4awkaeT z^ni;jaQx6WPocHMCyzWQ;aM%6^K=u; zFwY92ST=h+rEhW}LJ@T;+ZXaXXY(~$lYKV+8n-*UBkOrcqx(yTwO?Rr9k1Q(Y}Inz2G&E@yb<|g{+fai6{MPEG7H++>=2P{9S$k#XZ7USJ~gY zVY~K4Kkw1>1a)C0Is0B{on(su*ha=OL-`}O8yzwX$L|}hoiye1uW@fi`*44fszD%v zx_$)dwb^>Rsz<0IAH2aS_{zC1UU%Mc!p;964-ovw>J|d8PCh?TbetUXJr6zle!s2P(3icA>H1w>wZC!^wirz2fQtf(%z%Ai5lx z)XSV5$V~PZ;FT|hcbq}gdUcbYMQ^?D6-bHN1~-2PL6R#qh0w$XP>d|sdH1v55`)%W zEv?uGl9J%UC~a_wyR_Qp{XFNDMAE_ya0@JAVHJ*czKlDpMRAgqcDl?RPrYZ>7dbD` zE}HCG>IFvXRx<-Y$rgjZ!$sj6-Rxb}MQ@)Y-PeP!7zlv}+^#+g!RGmwT|=(LC(b$h zj_X>9bkM$~#VUi%=72w`l7BV698DZ|++I)C?Q`m`-G9rvv+0yi_>@ru&VdupmZWO! zK}l2%u>+*zuVZDS4FgEua4wvpEgSESLV;SEQhV=1e8I1*(-k$pSxJ!P8o%cVA@+H+ zPP1f(M18i;h{0qQyd+fWXmS$wF<8=t!Q56X-Ilhu6o~JVxMA^*sFdxVKMA?35EE*@ zl|^_Kev;=$Kakytm!@Y{%_6g}J--@5SGW=Btc&&ptMz5HG|CFdrQ;_q13tWip;aI5 zW$3ueMlRRzY};dN;rmecfq>5drrkxcCi78b=EA|t=7$V@wUgL6oXr0q_c_{RY!`rf zQYi8){DI}~e0<+yx6S>J1{o*if){dGQYgqjQXmc?qC=2!w3c5eE^P{mEDbu|rzoZD zKQxiT?Iu-2X-aeJ*h(P2^Y)Ds88UyeyyNe07G_7C8@=BsT?FgG~q&jzt*gDQqP{@e)L`0a_w>_;~JQXjgfOGogcz#vNvJtjkPos&BbLgY!K``J`7d|<0iEdu#t;*QjR3$BTwe`#G(=gxWlt<{U z!s>P6Iv)zgSj(lVSzTY?j@+#MW%Nzm6%x2K0CD1gnw9(c`eY(;KgwIU!&&KehU)x_ z)$s+y=`|JXYg1}7NG`D*L2I*@@H-bEZbY`*$+e?Dc;D6%Vh{&rRp@euK_g;OF(^8I zTxz5C{i|}##_4unBA;t>08}31Mw$+G;k<9Mq2wRx4IcKby`6PLQ~B1J!$v%jV4MiE zo}u01BI{8+5!mN_;`PO%^n{=99*Pj(e8!qhZ25`Y(JX%JTryw1QDy%0J%Nwl2c24H zMgY;Dkk@T_?(``K<@kIG$jl!-qI^=eXfe%uYl10Dw@oqTryE%gp+cme--uH_B;A)6 zD?b1U1F6>oom9WLT|&HaQ);U(#ci|XduMO@i)8Tb9ui9%-5!Jxg6UtR?nf(n>{cgQ zNq~<*JMAZ`mp_z)0>^J~8({=$2cYfI;cWWXzb2`v&ECj(=U(*#O{&7C`5N- zrMK4PK|xQ)gWr{3*w!WObNh4oJ8Wz_ zg6yB1KN=u+!L)<7U?ph~CVRp8gxT9Btz@)o&J3!VLUp-) zV6q54pFY@9pgsJ6v5;M^iSwS9_UX?(_P4dq*UleIX4K=h?Yg&PQ<$>`h=d&2$6?Fw z8bxRS=6!*86a{U6L{vdn86d3HN;rfHc@N-6E;Wulj&kzNpUmFmSu>zCVVRDg2G7&> z06Q$c!{t+ zK8F zI@jyFu*rweiHslz%#754Nx^Rg5e}xMflh<7=v7c4zidEh4&xp; zL!Y3ibPPrhzF`~0z8||%nWrA79nn%?QHzpf$#VyxHT+R*St?6dYWi}vEg2vrP_mG`G7K2B{eALi-q#w~!^S)A8!Pc1?2a2?pR z*}NQHQq}42pE%UZ^Eh>XC+2dcs>2UkyOTUypg8{j1oB``(A%Vv4|j7l&Mh;{uZS2& zU>e>H?d@g%o14>7;lK)ghSF8|j&1YwCTU@I%N2N;^8$aY} zc`s^fMnb)1E_ey?rMLyZ7r>S*p_LhWDSK%H15MZC(w|zqiY()0hDeENlEOY zt{^__U=B73V2azxE6oZ5&sYK96xB99be-iNsQ}zt3XagddJCjSkBxS8NN!|H6XkxK z;CI+3q>|2KIH|o=!n^O?o?PC){}hi=Sa5yw^mR&YLHeA~^^M-1D7Sl>V!s1+Y@R{I zVdqf3BbW#9gTUyOG2AhR(AdzYQPum3d++9@1x(t9vx2ze4Hm7qgt~^k6R|$J@o9=@ z2lCsMOd@ghVB~W}a2uvZvQ=Kqaa>33S@r8{v2{P+W(Z#y;_m`O`?Qjwl4upED7Y5~ z#x(u1Ce40xiYmLT-DS&}7tCQ!;wwpiCn!dMD-#U_R3Hc2WG?ApJw={YrZmIj;KB>>k2s#(pjNn_nKOEEue*`to;x-o{dbXPEHx1zTZ<70HznAxDEt z_Y2)$J~|LamBRURn&BS8SeN}Ey{5qy{Wv>)(ce$hQc;uVfvjOG?&k;eA-2vpf=vJP z-s)Lx0pm7;-s&PeAAOtqlTiEqIo$&6m!-4YvU0xgJlW*Ul-p{7y*zC^N`Vt`sSSRs z_e+q(t7AAN24DSQwcZkT+o$is7Yicnt1zt^culqo@CJlo&a&+Y4R|NywKps)*Pa*r zve=T_jCGn{@gG7JK9_NZt>vwwuiY+Hd-U(U#?ymWCvn!`D?@TeLNUY-fqy>f2!@NY zV9Kt)-52_3t=-~B(QnkxsP-1|FWvaz^x=xcMt{@G>)v!+Cy9R3PXg&q)tzV`AH7B^ zBIxDT>-jId9J~zS%$DWoprtQ@rk{4lH{)Bf@^|7!0}rFZy{V)bKjy0+@JHA^PC%)3 zo#a`YqgRet+~)lPtKngOP0}Ub6mI+Ia;)9cIQ`_IvJThJrnp|jR4Ef+2zM3o0)rj( zb%F=+ven)(kUFXIlWQg`-wBV_0acYG^cnVpXmZ_$_wTM<-Bi5Sk2EJfISD4DCxbdK zVVYJkcQ|JvekHFB!1`_z1V3opM^XBdgbVqIVd)1e7$UC+7j+Z!Wl&-aF0 zI^K0R=Jy;chtu5*TCWGMjUjF|(K$8sPxY8-MLm9w%{cZcI)CEtB%d&|sBN-yne$WA zE%0KLN`@r0lqhe+)EsJZEA_JXqzKHXS)Iz>+%G6Cy=XEJs=_|9PF3lGChit|BI89wTP@D>zo@qm^EH^wLx+m1-QE#uxEZoc-thq^arZ;J}35qbumk@1Wb zYTav=1k$_95nUPrP0)NL@cugYk@2UEfP&gTq`G)oW(iwk9AzXAf^)RML@=yN)7mBv ztEY^5BD_<6g9xqd!oK77hVpli0JDlmahwQ=WZ^7A<1eBZ1U#R)TI_p3Zpg>dj$ap+ z{~qIapB99YRe*Blu??NLk&p+bpUW(jCd2_2cxS;R(?yRCPh?+v2Rxmz%C?C8-C-zO z*ER6RL`{?#1g2g`J0?>f_sqxL z4r>u_m#UK}&E!*jy6=$M?j;~NETjx6d7d!lP&?51QdrTqWQxa-?M<(zU}>XoknYUr zF_Z{AYW1_i?1lk;sI|WGUa1N;bO(&@;~o&gp?JU2*Ye+amNAxZ>+MMOLEbv-+`AV9 zGQ93L{p0M~nrxZTKejPKY^8}>KOyb&7(V5$;oqkXOwJ(o056z-8T+uT(|<^^$0~0( zjNK6V`(l1?*)ig3FLHKjB~3JE;g z`N5O4y{geJH(VA||QKt9FUYl7m;`LTMq z^A&re!(EL-@n*Vqz@&zZZ#{af%R0G!Pi4*WtO4#M1O1u|jVjnP!FDH`9FsNtq)~9! z{7j>-0T_hQa`6EE0?%zVhU^@eIZo(=i{rJO*yox?uN$aj+?|1a+l%v1X4_&UOK!hDfqnFzARjWvr~<+Zlg>h*KN2j0@%6Tyt=mmumS#t5HS z<<;+c{&M{ISA+=XYrLQ;@Aush#5lnWb^<+v;l;QQ7bCqpCNvHgjGu^p!NonGaYZMS z062J(SVQH2XzCThk)rW?)_1gQ2D6(3uvdI=*EpX*@*^Lu3E~a7KaTkHj~BffoIgD| z*BJTr?Wx5~_^%@2QAEst5OI_i1KTDst4B}9`{oDj>w-~XQO)|&|F~?o!RXNV7F_H( zOhlmJI3?_g+t6>hlk!hY)c*8BPM>^VeQM9@6G+eQflX<=He0KH?CJ~sgw>H%y>{u$ zVBf7y2og>do-8Ko6ETO)D7I1;)tt>D4#0+6h&wiLX9lJX+~MTFl)7>yCy%m~gCWX! zbJR3Eu{J#1&tK@LfWL1mHEqu)Lp~XxkvYwI9f+F_AaCPhbgXNtvjoPkkDR_1v42gt z;`+TiKmcO}yk4b*obT6GAocpQkCJIUud*D>^mp0TC%;(47mHRzG+LvcQ6Hx`?YZL28fgiuYT06t{_<;15G1B9?QyF{fnC({%}+0aYf6q2pHrC0kmgU zuO)(FXj4r;DKGCQbzgfxd%@rr3>fP_&wA)pSXoz0`DJts=+o&b``J}xG2eaD$B@Xb z_6z+>>Vkg3#cv~07rP4Sg-cJm1C_tBjP7V4&4G33`eOS)6_38vzvv~0!+ZZ<~bVxZABN-g9KrcURN z;&JIzURSw=7fc6E##UiG{Gf3#s8N#g+mU7;uR~Md;jR`h<5(*J|3O@|LZ=&P`&;qIUjQ_{jn}%%4(Y-)b;!+-GR#tm% z=n&zNy%%oh1$}PFlD& zI`R(|;a$Fmrj%Xh;(PPRH{F0BEeReg{ShzFENrf^WV45W8j=d%gClL)$af|6?Y>%L0$wJAi32bC+GPqG@N$@mn?7>JuO1JFw{|gjo8wOhD2S{ zLA0@}W`Q$bZu;d14{K#Yk!EufB1{hQUght-<<*|wibCC`bwH^0n>SavJ^iOynMiGx zW9!%kAn)qZt}oI#;IievX%6ej;G*Cr{AhQC5Q0-L&~$rfX&}0V5LI{Ndxl{z`n9JDR-H-7<)Z&m}pOQu`Ql6C442EpORJ^V;$0AO=4^iNE@+pS}&(dV*T966@ znBlVy3@uNkA1n>^dWG~^iSQ7CEUBUPcHLmWhI4@Iov2xBJcyDdK_?Ds{W5XLC$BtM zha`6l3Anfydu9$yeW{4rW$Gurr+fYi4Iaa7?Qgeh0$qu1|@^m%XeBv0sx7_MZNrAULRar(2YE|_-zmG zSuZ;NJnPfHecw?9q6Lu{Z#8a24ZDk5(>rVH+oSX&SpA0bW7OHnfTdN2A-ns#Wfc0* zR|}Hy$RW7SSZKBEvV30oV`-bEPgD)MKb1S?#D}o|TFi_dr-;j^}}Qa}IfT%LY0TYF*Jczq7*nDZ%x$2kXiE1F0*!&R1eg#`Q`k#=lRD z%(8h>DToM!gaQ=#$%AUSdq4NyVxD@P`7ix6~pD-rEaE5^pY*s0<1RP8lWJi*cI zEYdm(o5Om<*h40jskGT{%gz;{O^xr)Il*d^4Rj^c z2G3J0i5ioLB^qf}aBgRy-`G9CLw+cYov#ib+r)Q*t(@>>C7Fy()`0r3^7Bu=nx8_9 zGWk$XV_Gf`!e(lzBHT8ZQ z=_2}G<~-qxqva88oj;OlTfvsOk3bCH7xpepFBr1wN4>qEG>)d@i1%`Y+^q&i4h{OT)EKE>5)*5-3@7U7H^ZtZW7m!m~Tm*K^S7d1R9 ze7p@h&zhI|TBU_98_G^BZDSccpn!6Qf>hg@KiWRwN=dB_=xlqT%NSa5K&=h=T(%)c z*IR;yXF{DQ+BKYPon2_8U$)hX7*z*vYeKfFVPnQHx|7Lbm3RI|AC$U&E!62 z7&Crx|L%MCNF5o9q+VMEU%xV&CT!deCXL$Gd^RZ~@B98<;y&RdiQm#N*7_+EE^@t) zL(ieasGFgjJpktnlo!d!GOcum6iTD@saAFVd9ppR^-_Fp9W7}GpQ>6eVl2V#e`qU-sC3kI!~$^ zn^%{%N{@j2X%{=pAy52*^zHD7KNZb;bYwTVD%eMv0T3e>(>yLk%StO+*av0KpsYQg zD|Q{bxt^AM_ewnm+#gwk0vkFXfAL+Y4-kXq!7f$-wb1b7SlSt=^t0mv!9_TbYbiY8 zMb}C0a!gwv=XMyad+8P9x(B?>36){lrzT@^w#?5*;uaDpvfp^mCk-qEUXjld@gFd` zIdjiMNZBHYo-d-9UvyaFh+3k31GzcV#Y-uOV@&@4dTT+f_ZV5`)v`Xe6e|2`zv?Z4 z_6LSmTeU{-zno?(0>3ic-*T9Ek!tcP?++n{^euE;v#M#?`Vr>Aj<7jsg?532ceF6| zHCZc}VsUn6{4eZAiSVduhD-qK6NEyPw@n2hY%EhdeihsqLYRkHiJ;b=81-y`%YSB`r+8`2$Q_nycl|#8 z?Zn>g0QrG7ra8Ejc_iAMx z6KhOAO}KTfNbCF?PxnT#uwf5niSH#pI^Id)D>*y4!I+r8^%gQh8|NKgg*qDjZ ziMCEt`sp6D(8pU2@cs6e4!ltJEw5LRglh#GeZ~zB0S)Ur@Ec{cWnvS{7%@JYGUnHZ z^}E(=vtf5u&dk(xgLU&9eIAT3iKN3>CDiKc-}^PiGYuVwIKJ$n=m0dQ5({+6+~A|| zR-BMtP%tGp=xSX}|MKA=o0nlf7bO+&oVBpP%B6V533lH&KhSa4Sdv+%X5{2fDBW9r zEcc}30lGR7YZmg513%M7H)7>t-6zCBVpg3DEjiX-cUh|VzIEI^{ve|Vg5kMX^QL#5 zFiEj(z@XTp1XxZQF5ABL&6)InKq_q;eYn=C?v0H zxliOy`rzw)b6pj44@0kNo)w{|Fld%;3PcZXa4zOaT2?wm&Va1KU<8VfInD;E-g|g5 zK=zSwVDeJXzgN%ZWqnYsv83ESu^=F?oA_(huhw_{39v#ffJAxiR-Z(y$cq0h_OXSn z@0ra8=N}uf_oM2?7(7AG%ifYDT}%{`9j@+fKXg}&FCm2cnz>@6;wy&&G7tYo$UkS3l0f`Qwnt#8I5{_nkwpHPA59)TV0mhYgI>B1@JV+%C^N~0sMjRrm<>xo z&J*wbA6!3d(r`R=q8!7;?!bPnT-G7MlOLkuTb^3{IeW1zqWZSC<1OK}75&0Ra2Uc{ zv56C+nik$_aig6p`6(tJ5#{>~b!n{t5L4Q4wGze)W4qOzVgJOH3lbP64k+BYKJTgC zOw{n%Kg+k@wjrVCIs~mA3YJ(HI?Nz({F!0zw2q{#<|!x>0%HVp6pSF zW@(EgeBwqef^@$Mn>*Srb)q6?k5f(3BOwpi&MIIOfEg5`MJODfjXoWA>FtGfDL(%3 zYxQ?5J5!M$vGWvW5Bec=jjVrvk^J^K{fniz;;HVLsp%l+q1TH@P*=GDm#PX@mbmyd z4wW*X_BG<;4x3e@k;Ui69Dc>PFGG2Jz))cjDEHdUtcC&xai0q<_>%@wk|z z9(w)1WB-3gKR`4U*d?}?_OBEVeHPYDhIrF#JoxhWFO1sTnp}5w_Q~c4a9B7knJETu zW5c0dU+UU;t8!V?2GM)8s%=P&aolHel^cDo?{v2?D$TT_wyLT!?si{u5|}t8@o_F9 zk1kA5ui!(zcPC8KTE=m2&$+XE+C0`~src%$+Oq=6S1S6Cn;a-lJZLnR4}$~Ld3Q3g zk|LPS&|seI#h2CY>Y|U5=Re#!9Ho`$E2YEx(*(;4GMDeOzI=ev!O&JBK00dYfob$} z5z(s`VwLU&Y=^z$UfB}cL@d*^>toQIIofS*%ubDEiz$vumg@_@ty-XjA8A~qpGzZ2 zG;>3Kf@Azg*!|qDitX4d1}jeo4smC&+a@Z0VI?pcMkEAeT*lHwdMkyEOHRDmy1-po zgjqiNbJv+k8&daDYA{2rgZ6aHK+Mp};m6wgG(~U6ZxQ*eFyX}6J6E}7@Mt|MGVX-< zDiTCce7usVYBsxm$9)JX#FPohF@1mAU2Dyqw$EBTmUoxFgd|I4`Y$XAob*DC$Pj~r z;DUTIHS*89eVj)YPMsaV3A2fCUVydEu!I`&FJeplm2bqXI6Rw7?Y{E6eghFZWXE0a z6E!-KfR@X(AL#%StAmOAw0bPh>>`{dxFDgUriqxdXuJNJJq07C0=1MQs$xb{lrLhN z_g^z^F?m=y7%5cFr&Dt~&I;qZeqyY-nF44(hB6Y;<-Qa=*sMqMh>-P8pNb{j1n zyT#vIW})0f|IlVqp47+Dt9Uhg4R*77kx&21$P%7x`#$AvYKTJed%Q5T4nUiV^rzG* z%wbw{l&-Dq)jOd;fL%U4!AI~SMBv1j8pTjylb?nign^%>sOCf-udJ+9Fo^yNc}^k( zF78w$z8|$4yO0yB7%3Kw0il+ls0Fr*Yc)Y6e8Lbykn$ylXp}EwLA23D_r3K+K1Toh z6vCLIPzz-bRVaGKDcJWL%+?aNtZz_G&1~ls*Y-*NQ>|MsYR>c^&#L6ZxzGzk>nkM%AnfJ_ga2M}(eiM^mb`@Li8#h3$ z9OjOG3~=%U&=tx6QhFxo9>&Pf;y(YINDC;YFj>he8X{1O7*Q1^QVmJ zf5_F9Z?I-F=(gz9s*=);Hh?!VrD?NylPQEfOt%38d3)On^cObE9+CyKyhOh@HY@PR za&t|B;QUVW8~rKapN5+*TmIMT zm+F@>CwKoy{`nW?=hilFxZiVJx~Im{L$#c71pl7)^dn>{2l5;k1Z;yRb|fThi$5Z>sIGSO5SVm(utw+tsp)M(NQ8wP<~wp9FU_0=Ed z++iA{A!jFqAhpIiF|ZW(uDse4imAKo@#-(E&an7%{;``?T#*h|`JCO}b>e7)n%}Q& zzV{Rzu6gpu#m!08Z1rtbp>%f>n3_hCgFQ_us@-lJ3rtn?;tb(3Dkc?z>QHcpx!bYOL4RY+_ zS#geO{BvdBv80+OniO2=?V+Uri0wPcr1TuGieN<%B zcy~*-Y0`1v|cAH&lg=W=rYl*XDDQ;c-~1RWcj}r#OH;+UvnRt<;ODE5~xc)Tw`zV2{>+kFq4= zBg&9sP_`L3z3W2EU)?ipHJ?xMq!Gz=*$jSc<7!oW%adj(()H@>7HXn~lHYB|AhzKu z0veDb#3r^(@208x=VpRyKO&l6YYeSW#MGjh zJR~*VpX;cgOO;;&B%S!(z*J^J!A6W~PeDWiN3#h+3Nnu$zWA%aoICxDzRGYuZ2tf5 zQ~a-B`5(9l_TinjbwkA~8-rJQdQ{WIwPUltd${W-Dna61ry?bzX<7NgKzT>7!0{8A zH=mI&B;I=Yb5g?P9|fkLaR^|Xx*HKO^5S1XHhs1@c+01)n&-t2Jl>A0;w%o-Tm&B( zJ_;esK89B9J>4DkL}NiB#ZmV=F-5Rnic?3i7M;jA!0yJ$1Dq?IoD@o~Razn57)Zsm z$wpqPP)Nl%sIM5?aptm82u^A|%%f17MBHbzi&LSw_}(y?nA_0Uh%By;aC;USp?>ea zE`7W$rbw~InrHe|_E>elOer#K;>*xd2g3veJU&|-KhvZ3xvW>_zHQvHj?ISCqN6

pf}~mLjGF`*XYPs9Ls?R(eL7;XvM{%A@-Awv zxJY_tOs^|V;DaNV@V1HDmUowW;>e<%Gzhp&j?NN$cE*4C(P;Y&EdSY!%i;4?ky!04 zZPp9G5C;-+W2ULod!)fLf47$rLx#z(KLT>buYyQWvb}en8+Bu6LiDi-Z zi%T~AYh>Fd~Io; zQ-JDP`Y6Z4AoXQr-@~d($@2$SY*7m-D9Y8Fa_gh~Afxwf1gC{Jwp%p*VIazXhHCqjdFGtJjq zkiS4^@E>3{7Y8#q%e)L_yB0FF`%Y}~9gb}}z2{um#howjgorY@ST<)_uOdyXwbVQF zaZcpRpP_>4O#70F!8{N=Xvu{d8PhvnXp~=uM4dlqfLMLzNwWg1S$NXZjHSEpQEHZb z-o$3DwI9xaECo}Ml-e#C-YN#IC7wX4B?aDCWbg0|!hLj@@9oM1nw{$gb8;_xxR<7X zBi$-qaJ0S5#j3|KO(AeZ?=GbYC1Fl4B1H}k>_4M1TTkls(w;X9;mj8}4P*M~+um>Rn5lz7U%t_Zt8-gBie% zquF04#yr@^RQc#}%_D_qQlJz$Fz^L7`TifN8mMKH>k&y<5%%F>4}!qs*R|)m&V)E| zTz$8Uv=-Yrz~F%tTOD^n64`T(XDVI{{Sy{HOS!c#yC)OIxGeuWfoOE^;hqBr%=#D53l%n!DL8tB%yMs{HMPOWAV5{~rb@RYD`=^JGZ0cF_sG&U&y%rC5$@(X9jF7*{8IC@nM>x; zUE+q=Cq|oi;{JvkrSRweolb<9)2HtxVtKzT@B!t6H>()ybexkc%a*JdMBBP%cjnl( zJ-8l7`Zs9_S*a_h>kyP;PcJamEtEW4EmIkI}Vkw6`C>S+u(59a}3nr zNU~3}m#!7$xqCFRz~uet*pM4SyO6h#j0nexuVC-V>DE6~VP`jz>8ZC@ZI*wUyE(;IS%$g--M=c2+n zy``>shRs8yiKQT_6L~cpBLgum(IU$R1HMP%G9G;WQFVWvqrORgU~1y4PrXo5)_%}6 zV_qUx%e=NYc=To2SF_i9Pwbs*15?jvng*LElq@96bp8gVaplMQ=ye>iXwQJyJjBgKu>?w{=PG`Bx`V>=Y^4H|jY1l*MMN zKU?m9cn@K>^SY6+Ur&r;E3d}yF}M?M+OgN!w2if9fl=_pl@7H967DjvyX@y}63NEZ zt|z0A8t%{Gt4?{fPZ3NJzRBcPpGZfA!4-_Md&XI`MqA9UX)|S&xz-TG1GjwfnX%U% zKXP{N=lvHqw(GyhF~HvWJa*swUhisdcMzAk>WpTTJlb48V@M6 zmdlo||2R9Ce(T@&`nYnOZtO#6A%}eaRh7h9O2)F#WVa~ldOwN%DTrTWxN&{&-EqE` zJ#VN%X~^$e2tEV3u?e{=l%~P!w_l9SlL-IQQMS^{sB$=xnyd~HA zIS+;)_F>eR{?L{SLlULYk_nDRR1%r?;alpRu>;q*R&0D^88)RrHfo@Pc7{^D$hlCF zpWoxbejo_mw~g8(`R1WUC#JZY%4o2$b9j2VE8g>Pgz2Mmy+;$TsHc2uyIA!O(rp}< z*e@hF|77Q3#-5s(0gYq_t8daa3kOQ7o{913XCf^l+WT3kws*hk23HNJDRiaWtZK)XpOdPssWY6W51S1P^NFCY2$4zJ7!HL4K_ z6dd??2qMsZIBX%HqUX;DQLj-lVArzbaoqE*;t4>u#wOeOEL|eu#oZ;gV#$|Yk!+`K zl^(c`e^aalCK!_KQ7YcZVUNVRf62{lyO-FPb3n(U>7lThKj z@bRSS2a2cz_+LjyWPPtyM-gr~aP6)g=`ggZ`3+ zZ7WhUrqOe;2I(pak23}0JDx6!u>{=-LNy}UL`&D)xfc~*91_e@n+uBd>tzQn2}R5A z>N$ZZ+eqZ?>zgsSklI+#BtLoWA>y@K4B!lNYuTc++3BVtw?dqS~p5 zv1*;}TCAbL{@;RlQliFnXJMI_!u0%lDLL5t(B6VbNf^hHN;APN5qZ0Zy?=2O7cEC2 zL~*HqJu$1@>8|GK`q*#}XT$W&14P$yMzbmB|c&N{{7raatpEx?bJ?;CpRvp=QlhB0c za8unWPJ3rdbZF%fl08wWRG4-_X0`mTP)=$NwESd!|wkrrU z%7f9e>qyAv()Ztn&&Xyss!vKOeb2pV6bTdZVEVDX5GLOlAb?#USjLoE9$A($PTY>r zyEwcZLpfw!nFuIG-Ehc{?$KYTK65JJhZrsl2{*@M{j1(X&7~a&+Yi?B zN9;+Kx?MM0?u2Q@%3$3PqJXEpyqc)Cs#H?~c-og!b{~kilYq|F8=A*2m6BO-U~kgS zLxneI3Ox6uKSkoiOa#iLW-x zoD5Lf(ZehxT1Io3JEm_R^0ssRt{TmA#>3$mR(f<@@>Xr|GREo`n?2okN`NEgD7 z9IT&|tBGSj+n9(NhxPghu+!h!j{bRO`3kJ5wBJ*_NsA<06OpkspoZ3W(Vt4J%5}S1 z<#XMv6NZt-fC;E9)(yGGgDo0gFR}H4_GiB0tL3zztdzW{>XjSo&pbEQ=@K-bL_HU( zn#myAUoS&Cr{q9tu0`Ur?20|X8if#dgMos2Mdb&(@t4n!N!_y9B_mE@c` zt@yjycBU@g;e4_eg-s`qaQ-B?1>4{5HQ4X@9Tzo?D&Kq8NybF``V}9x_U0D#2YDSRZ52Q-gOaupDyJ2a z#n?p>ExH}m@At_gM-Q$sDzEGbc3}hohvOu&=?MA~SDI&G#w)9i@3Hr<1U&K9wTS_j zN`!|BE;-}r^8k5x|AtQ~2Qd6Sndi~Vs>T4%s4FElC%RF(>`RG_nPk}6aAa#nt7QUW z|5DxIMXEh2YUNDdj3F#N1g5ivUn*>ClII^pERGdIiO?V?`sUE@4edulOmxp4EOZ+w zs*y&P`4Bv%1Vk`=rDWg~dKI-OpLMqdGtO=zd~vPEf?4P8Wk9o1(S7AFrF6_rC#+a2 zyx)BjUqT=aBEHVDI9Cy?KygYw!nutG9Hb*@ILOR9y6@akEa?gJnw;MG#|9vK1+;>q zZ?QXY8$vZ8Z5-@|F~vbGH~J!mLb2ZuM@JhdER3Fbnd?<_ z!twO(DUw4dvvB7wxJbD}@J53ptgL%!&348WALdn28W)O9e>`=*%Kgi3wM{Mg1u&EC zmRGB&1+OjQ?aBy$7{`p){?M%_J3-c_OJ1~Yz;JgBWn%p)x{P+bYT5jua(9KjHv#g8 z_f#N$%#{ic!&ykn%vI!BavK=MqH8u8-&z7!n{NXiJ})R2<6GpVdOj~tmz4+dsvZ-g zB}w|Gf+!-~*E}#_A9Zb>DD%Qc*VunTcIW8zbJ;44zO<&i{Xob?l};BwB@=cb?ynMy z*U3U{o>-tEXDKybQZ1ZqaFsS+s{lJ8ejrYS0#%_sccEpwWlq^BXnO0tqCJI~7P3~DnC z0sv0ty`x%}CHoj*7v{hn6www)i=*)VRJElbS}7SG{SJWt%51)seCukHY-pnx!oQqn z^Xa_DZKso&Q($6%gwQk5An(2g`36TprsJsf*fJXy-x>x-F=d{FFN^(!8S)@3X$6Lu zPvBNFdm6DL_P=K$Nb8w!7B3C+2%Ctp#x`-VdyyNkG|(QEMvy{4e;L06GvC328_y@~ z|L1w+Po_+9zjLc3)L)nmqLrZXzXunO!~c6KEg5t!`1CODQ|T?znCKNIi<0U zPa?9BBOPpBWG?VyrMggehKw%hsEPeZ@uA%5e&o4nay?3GV#~J^s|ZL}{)+UhUf;hk zy_tWXA=Y94QPha(b@hw&7!C-3lNDh_Noy!@>tY{Z_ZT8O&aD5+<~DEz1Gfzoa^7+i zHJ*-U|DcXn$u``N)JutdbATJ(tgiwTwm@Ooa9nfDk0OIMvAAt5PPo+g@&WT}blZ55 z5K)b8Ld7*F@1I*=%u+A!buEcJxHjE&AT$tZpX8UIQ}xBh`|X7@8&%7s#HY> z<<5*lg4#hKvl1{r2MumE4(41|1MSZkVi{?~?J-a_>sykc9f&pZrS3NO?Td5N$F zCGnA(-Hc5S&vd7_c&dt6fB1oKa4fplMv-XJa znuclPFH>41A{Jlkj2Xv$ZHoZRHp9xnO}qrr?NNd7bmyBlC5UlR)GGZ3HL8T61}aM> zntzS>lQk|0=eG#~oUB#AW*cI?@E006MyZ$A%pfJb9Ne)RK@bO0isvd%m`>=^9BmjH z9up<{-K|7aos9c_dsK4gYtJ&5z_E3|8F}`=IQu8|D?Xx54%n=_5`lem!YbbOD=O#N zDc^mndA{4pxq--~YK9D?LHY}8Z`BV7=SYz|KpdrEkgml z6lxHKUM&%W4#q;J#xG!W$NBen#IYaA&|>+T|c`Ipk>+&j?r8jvBOA`{~Iq(qa2k}CaJ|&3>|3I^Y(~R+;RYQi~cNg zWuhVLPLO(rVD(d2uw~I*_9(LS=GLDYSN>PxpHHf73Ti*OAN`|1*VfTK4yDf#WH1s?nicy;Z#g6A^;fgCflwXCa`|l9 z5XmIDu;aalfkI`}4)@yl!KQ~k7hzQVs1SV!a%Y@{qJ#vY7H5sF>Wq6UOG5dZ9~XfR zbG4DhNm6=C!%SU#{PqF~X8C$$Wxl$9yv~RP_U8L=ZnQ704o42QYN3ywR6ly*IqucP z;=N6RA1Z;4>zP_C{3{(`6`Smq0?hML8cC;aMrTxl`5&c;P+r-PJPHK7F|Z`_$AxAN ze+rd&t|nN@)N-sM#o50`E0?aRwO0F4C(NHsG#^zzGINTHR84pp%&@_u`B@Ps40fXt zv@7Ywi+c|ayhHuU+4t$Ymcv>w;F#Z68zwenaY94fab*)SqaHR#j~?8+{%uMdj@s$r zCOWEuBPgCJ3uc`5xTp-W&oZC+GpvF9!!g$!X+10%+Hg)S@-#Ru#WG`#ySgj2OTG7Qscw zXod{C?JMTT#AxPfjQ`DN<1R8Ur}n_o{^0CjEH2s>Gxlj4E2tbR4EAfnRN>;p=C(3B z%99~Q6KWHR99Qbmye)G9>;!mu@oUm24`s4-*qYdOn<2}hKe!#KcR~xwY@W!!Fr@*| zd_{7DQ^IcLFU+%>$lF1;E%{RYy0KVjIoX~R!*g6i|H*@#Vku1ppmd?KoT@dRE#%;wVe_?Z6gqPik$hDg_hKTw9 zJwgiGa0YV};;Sc=PMt&*+C(*)A5Pw7(ttSnPH50+Yq1-1q9PRD6#UduH1AbTQ7P-a zVV4s{zvUD%9t`1oxZ`kk5$-M(v0DOFU1wRIp)_2AfjrX*@*^Yo=cJkYi(0aCo~byS zUiPQYEpo!p}1$MWgVuh*)#04-4$vLrc_ z$wTWRwrUxiDI;X@9LAQu*G;0NF->zZzZ{tlA^RK;!mz{+tH$QXlb7-QGa>p<;tE{@ zz4Gl%1Mic~T%@e!_7>iNT{9Nt_{Y1!YeThvVTUG=;CEzB*x+GWUL#BVY`q&{~;m-XcS!i;o%7;`pc38uJEtNama&A5b* z&_af|yz{V+l1(x~(SOt!BZqAy{o@;)*95KW-JrH_xp|0tYY&(sMu!vTl)e7Kf_W{W z%V=5b%>Vwvh7(8+nUwvCFcW2XB$n=4?OGm|MO$4zQF&midrI>2ZNN& zv-PDO8e;^(in#wWQLQPKd0;3R60w_Q3bPNYoTQM5(bU>$ruL*j)8mz_ehQ0+YOBv56% z2hPVJpHVFfkt?;;w-0%gG>aNeqf1X(bzezmorIKZSM@+A#e*}r`Nf<=Du9Z9audoR z-0kLoDoL{qp2xXcea*Sv%a3&^C?wY|IyN!R%}Ec%8Hjj}YDZJt)ryagINh9j<~#4$ zS5WZnCg=R6R-GyN-yv-9`8uhMT%t!(;cYD4X0}?NchN@v)V*H|4G$gU0dXuRSMsWU z!O0k%8=TZnkuVJZKeZj=KJXq9#4v~L7AAzsi18i9CcB#@6IPVVO=%)@Pl!~2X&bc( z1No;&$FQs2k^(gXlRK`tn6%=aQ^AnwmB3!Ae ztQk1H6*uB+y}Qa;fuALSYFmnB9K-a~?S)wI*qUoOWA^TMi!rX&e1IyG52(pbS5Jz< zf(we)RIRPrrC|#RMx1rvI+P6cG7e&T>l9eo{@o(((*8~J0cQ(4WkQJPuiF~{w`|{_ z6>t_CQTAG<{sPthLXyx{ScvC#Dm1{jfa%*YT^1>F%QT+;(f5A9@k}GJYS&4KQxPx5 z(n3iuENnGO8xPDY1$upOJ#=T~m_`fC-=6|6;|ec?GDz0d*=}y#LB8ILl8!m)XYj7z zS?|g%TmknM?{St>v@UAgBq3k*{2t(TL~vuB@gp{rt*|bj8X~go-FtP(+Zf0dNxGCI=t(W zX!GgtEvy}oLnH&b=qMk!lvvPTM)5>cK9ZSRPk}eq?4^DhkslbGD6n8J%eu$f{xm>B9k0B zq4%zN=(FFlRJ7twHJ-6;z4&Gg6i=RXVX8ztTaL*C20WBoS#lE~eS=$Kea}%?zQ!N4`xO zQnNX*>n|+3(g@j{-1-L>XP}@U!cJFXN4atC_<$Y56-qPgC){5=qlPC(S?V!A4E5gNauH0%;J&3HU*O((!Y8=`@2l`oo{%;NtZ&EDM! zHe)Lnz~MN1D^m(>iupm=EfZ@7PTTtYU8}7DNtSV$jOK>l;}?zOgjNND{XSs2;gW0> zredGJTJ)fm1F?DTvlG9evho*v_lqp@!s4=oYM;5H%d4;EhjuaX2tMaO0!Q|!- zu{r-)1dq?AfF)@qKDc22(DnL{=S&2_&YfuZKaMaUw*3D%&SQlB-}BlX?Abb9i2Fsm zLHh@+37a)r8Xw#AwkNwzo=_wNDI=J#O+q)ZEZz{rS?pvPa#8OnIQ4)|?zPS23(H&# z9oyhW7=*0Y)!1b%e8Y)j(N>Vo;ea*=L$932vYFJbD~1ofIj=@=+E#)qtw6Iwfn>JFDGTDnm+I-mj{@X*@Y*y9rfAdTP!wCC zow~4aw3BIra5)qEV}ju7;8X;|JUiKmHez_Q`|Y-t+9&q{bS)4qy1r^$&Nw5b@I-O2 z6lm`!;QUxu#v-4P$obRHkZm(cKgPaw>GG}tF9jufX_n?UX`GyFpfqghPnw^S5#Alw zP|4972x7s6mV_DiAp$)QcPqMi^szW{q<7V7ulE(h70V(_#y&~0yD2EmZDocS)0p+5 zMw`?v3gu080mbnT=TPiU3|04vxgL=f_fn-ddLk1$hl(2MLB4Q>tFGmZDkL_CyX#GrEXJ`u*8mqS z@!D^=buD!{XDCf;D?LrMbK#07EscJMhFCi9PivF`Tr|xwLQH3KgL6t}CFaU%ZCep$ zHS~@AZ;ovZhBszh{y6Ie+UYI{&gVuSvhFkawtoJhRsu(uzK4(`dtzJ@c&Ix*pdUn= zReG{4Lfm4!!l%X-af5XPkTN5ShC{5PCOm4VWySlQ%#F_pZ|(>-KfUp^d&!;ZcN=nY zp?=tDAfEp+bB5Wv8(sZ$7Wjr52zPym;9xD68)E_Gz>f#U?|!IrcK@_|6s)rGY{93L z*l`lR@EiBD&Jg4i;KK%P*3uD%69Zi7vzl7?u3e;KG;=J{j?C10q{XgRp1t{T$$ z<>$FcT3c@LJk1_tSerA>+Re$s2anIJop|sr=wbeH%m~NThx$jp$Z;=YX@kfSIO%pB zVbVf=!Y8vN#_Rx0w|bYp%FM(Ffz2yMK_*~$X^2>VFZuED+kzqo_cnPst^2prizg^U zZAegn2rbd_nCGBmC;g$Qm8tWx9*1oH1URj%4-ciXe>omxrL5|4lB+N>^AcL4cIPa} zKU_N0I{O0*YL9%8#nQ@vV!=5t)|D zJo%7;k08&5E%m6B53{E#mGQN~BaBn1WV+&S11)Xu)v>v}hz!IK9|5SWz|e+1AB8L3 z&)?T&m$A>Z)I;HbJ94;0N19L!!8w@K?;siw@dU}DIvEc~X|2KaAMG;-PmrQG zs|?C_+8AQCn4fUrGJ-G)NfJ!=_3{bYn>2T^Z!NOz9D8%4t*LuhViM7WkZPmg1?ImR ze`;+Bm%1Hg-hIw@G)3N0g1H}0cXzSftt}gZS;CTlGPoZhQX|FSuXszPG#Ut%41MB!UF)6>U`I6Qy=iTA;=o|0s;8LemNsW?`Hg4QisXYgzQLy{>|3pwES&V2EEmlI_Ke>w8UKEPo%zxccvaDa2NyXtn)46<|o zZAK1uXKLh~O5DDl;ywiM=5ZKs32v~^!_qt{m*E2b7W?~Tbf(hz-USbzPg+LeZIHZd zSq~ea+>jWTFUzELQB;)d4XQdZCoo-(M_4h%bvjM%)Gg!h;mw!_)y{reVW2Ik<&|GUj~6)!8%s78Z0ySW5C{7&y%X?O{(n>iQZF=!kik0S>_q+3S|Mxr0I8dIz@C90 zStLid#-h)TbM!g?j{IDe)&ZM*aJO4=kf6ZL6j`TdfZE26!iL}!jf1DJGj~#9Y1{xK;*hKDDM-IfgG5G(o-W^M6!6Y!d+0{R><3+fiou{jUY#C-^)EX5!h2zDbJ*@X}QYOPXs; z(c_M&xit0NY{MapKOc;3`z=BHvbPi-WOyFQTj~clzRG5zg(qqH^`8-?Q`Xj34Ysz7 zK5;Hj5UohFAqaOwqJDAnM$KU0$; zV<{3pcg?|G;=sx6_VGfb2IytHW}opOlytqC)RmM%8|`TpTN6SwDV1;qCk934or6!^vWraDtoIByEG^ zRnY4^`>~Es!P!f{`nE%B$c!_WT`TR_VLsQg25%U>IOe;F2caQqOQ8%4kBKtlp}5D3 zI#<$^3TF#r-w3I7AMgLijdaG0;=U$_Vva42>aSE&7y=y2Ud-}K;YtqP%8DM+OPk2y zxFNFZ($GidacX7kWL?~-A!yCS2bwf-vFG#)X9+|G-3^+isWd+o8w!2Dez2lqM9I~S z?bS>d+p%E?qqtju$eqnUJZEr9#vinzebgp^F1!<7y0vupU(ho3FRbg?R(ncn|6 zPRpfJaZ(7ePARA3vZPWjTN10}cDhJtiAp57A8NKDZG1CvU97?Sh&Az6b6FDMr?FPWTuAHgtIeV z$!TcNtIko8F&N(rl$3Y;m-uO&VM^v9Ax6{FN%@|^=-n>bDr;>0ntkwkDj$wqJrP^} za0?(1m=w@crLU46P*pbg&GZse8!WJ9(4{N~VW!5Z3AWX&o$;F2@s5gLDPvwEM&N-S z45C+`rV1ZzS~cNeRmRql*du1zMnf|XFJicyh4bP`k@0Eh6D#qTFK4SOPkxd~&|ebr zulIGR%)(Q!?!bi6x;RlP4*PB`IfR_bx_e9>$&>~{Jkl-Ph9KS5vOh0Ey#3?d$g2e# z)AP>P>n#{U=ro)zc5QbALSGXEy;0|#nMI4crttdnV9;xWyN_^PUHfoB$m`vH`@`Wo zR&FYzh^clE_E6CmNcxzwy3`LY$In%d){4m_1;@z`)lwHEgTQ(Km6v(wt6iOy*^%vSL5U_u^Rm zYU)ie9`I|UsRt?v^Km@)lGG-$3h`RsDmRsR09{I_udFwKSi+Ds0~766(nP5zZ?`fx zro(!4C#BZHOm#8UCDQ}q&BrF?JvKJ+Cu|lq1R8Q>0p|2m((0Zg z6&7QG%ZmL+K>hX@pzOBb6d`y^G}gTs>)8D@uIXM@@Css#p(T8W;t>5b()MC($}Qx& z$%lyK`>ye!E6aWL@(8{?9)gl7yu<|8X`n1CGPQ9Rw(0nG-5sS_9G==O<=t#P1slgK zadvs(EhOF9hF=}XhM_(@^d48z9yetxznWO}X*5_XrasE8`O&zUZVxBlVoxsjXYT7c zzy5r{L?@ds+q^F}VcSo^2l>lP7$i^)T`_QWKAnbrdTlT zoQL-Zwhe=cg7s3)(ywU>izkN(R(PC;B^l)bEc_>26rSOT)Wq%=Q=o(ZjGFKwrMU@F zbFy%t{Lm=upt>bE=fulD7_1Q+JrCp<=!zbZQX6^_c5t<=!7@2mL_f|WtfS@qI{Hu= z0N#25yBv_QBg_+*ZU2$L8SAIx~OJ~mArgxoEEN2<;Q z&6OtEngP8Uud+>Bt!fF40~_)a{GQpfTnf7@D)?B*nDixZ$e7ndQJevlEhZ zqmMFns~i(uF3}@br?wq=g7@VA1@F}M^L97%T_aK|7-Q>Hi=}iYp;O4@H z7;sN1SaT7*nSMR95Np#!tuXOlvy}lTw~j#qd_Qby$ryH35-&Q7{Ns#z=D%%vCHscY zBe3?2JW=qGd+XDV+ z0|1aCAcCp0fcO}e^t55sN?wi`OxHSKSv&)D+jvH!m)5gaTNH+?^@;E-?!SqX}MGh zkZ<;?^}V0h6nOww96iio@U^+(?zeYdV!V`B-eV1|w{4Gq(W-Npy0QBV-c*hRTrp=) zd1u&ouQu`eVcqJ`1k0(p^hN_YLAFC|@L4zu6!&(4sSR*H@@bXrXI-H(fpn2RpfP@e z9ZJK^E%9l0@wr~C5DlirjoZr0G7{Vxr65pCyf3vAXF?Lexu=DYFy>7C-DpC(T~&1@ zd%y8}`fv_utsH^1&u(H0Bffli1#RAcp4am}!*cBa{S+20P*;nt>+=+6v{;LeC2HAa zWw}KMF$^@J#CHvP-9;}n(k*k$vwl}OVdZ`eIBjC^?3IkgEKyq{X>G6x(ttO}@b9eL zq~G^pMAkild5n3%POHBloJ|DMb;g#MNNuCgZ0kD7=LXQohMBoMn*1rm#=h7-eg~xM z@LrStI6xPjQlXS_J22-3;(*_3zDU4;?#;tSyUfUIklLQV5I$A|jn% zjhIF~#dUlix>bCdM<%M3ikeE{AvthZgOx{voR86!Bcm;wkUG~Jsl@F}+r;{}rXcdY zF?oF^XmQ7^0KqJiEcVRI8^>IJM6FJWF`YaCNUS&|kzrO_#W>_s?NsAL1}A8k$XbYK zfoJ5vD|4<3bFIs=C~T8Nv>uhPQb+&GAP0;>h8gZxtiNcXhFsFr_IxaP3(_q&3jP~l z$~}}tz7|If)7m%LDvupXc2aGa+JbYJhaQa%UqUx$)5PIN(wd7UE#~Q?s^EZoQhS}OjKr`gS$=?r*cbOf0>Mh*^IefkiPa#L#^zSZ`ydZo-2P#Ox zWK2ARbrZiCndBYE8G5sALNj^axb!-Fy)&*o36>Y`O-cr>pWv#OLAF}Rku+u$#>8(I z=V&d^W)Ci<+5h)aUoR{8YU}4HMhShK_MIg{URpOPZvFb^m<0Sw6bm(A-2{?{9C9*+ zac!?7o)T_cTbvd&>bpL{J8Adsen=?K@#tJpaL+fZZjCoG6QT$fQz*%WkH>QsIEG_? z93R`M$ZHyH$xU|Y#3aL^qnKN8UFph^3Nx zD+~0&6dMbbIJd|%{)QEudex=QaHqWdLQvqDOr|l3Aay^r_Ixo@aok;Ac@=7!k$X48(U1PwfR~k5qL1C z(e73iGbtgvxEmh)B<(Lym9NDFo+2`NT8kcMIblFLkIhb-52OSIDdnvQzLdD<@p>R3 xmaY587zgBuqJNsgEbR#Yz7%PW;VwmKW=jR1|IfATjN3Z3d)5E*S(P9Ce*r2sTC4y7 literal 149096 zcmeFZWl&t<_a+$JEfCxZAwaO;G;R$fK!Uplw-BIlcWWeA@Zj1&aBCoFu;7imy9Z}G z|C!kjv-vTz-*&5}>QYp9aqlbVyw7>gbIv9FgAx!2lN|Hevu8N+a?&c#o}qX>dxnsJ zjtc)Kgn73R{tLlL1t|FpGEBJx|A6sXPTT3(Gm__je-JFP#^ASzKa-c1P;*1r&p=Dp zQ=fUvn#p9FIbY<BwuZyk%!hWKmcs*jGvlt;D zS3N_-RM?ggmz1Za!`N~8st9s@*m6Bg_xx?=WC1egZ~k@U_15#Z$cB67Su1~Yb(_eU z9s%)lT7>_6`7Z>BTi0vJWJZ#H+3hjdP%MTr7H72Tjx&0X#Y2eY)eBy#$0^h&ZCQak z&lo?%9|Z-Sy4P(3%aX?e`cH0jl}_0)y{$HwSyrA{_{)xB%xM4)}@>x-m1 z4u`4?z?-Q(1_D9hsw(}pcjuYHv~Y0rboT!7$=QuU&@KtpxEF3 zYYH;D6=4tT>Iqk6%_U(cJG$AgPqg_}s}5eYEVloS(8temxp?z|YZCsqv8`c!SWs$z z7ZM`Fc48%fqL=?IJE8S}ee+_N&FJPHf#VX{S@$MyM=s3_)ls9l zXNRtAp2MegD~dz>s26Av?dyY9p1sfbAu+4H%R0{9(j3*xq+WY+r*RWUvQ*S5+Q9v= zbQl?5x$gXR6?NHO;wOejADB&lxeSHlTGZhs#NxeJDxn<_sZ={KuU66&{i@xn=;aOi z5IRp$U-k6JUjJA{^o!Y;Bjw;(xzrf(a;}v<^&nD2QJ%DA5>`}S%~v_oJ~qxtT(b&E zK6Z?COF7R0uLyz77rnhk7XlJQ^!a zb%dG5M~9J0YJB-Ykh;NJbPaWJ7cnSrU6OiLju2Nvhq~1C<#dHjP)~I#-uC6io=Xth zt z!eU+==_)6bky`B2^$F^i*OsR0aB5X16PZy=*Ig8^Cj)El=NFu6Tj-H z=xGAM+hdu-jD|B)kp?ceuZX-b+3baM^BvdLx|Qf3Top0eUJYyUJ#eE@YKzX-HX>Zz z9+=l>5+R}Lh|ar%@<$$!tJEt>KhVgZ5t@t)DE^3>z+;$7IlfR~(`bvHE@!$x^J+NT zS15~qFI{z?6!*pyv)O<+@r|hS%Y!Kzn!w4m_SZb9TH!Uu?h*U0^LrKY_f?|86r+eH z)LiS!3?3N<*rWFvaGxMZq3RU+>k)R9#xuXoaK3Y`kmgh*Zxo{c33~JhGClsI3q7z_ zq4a-$^#*)jCA&1IdCxO_OP9#n!B>d|PI5!q#Y#OkY&`Ft#s zKor;ven}&LFIutEA@8_8lC>XcZ9ENIZ}v;M&teWk{?N*7>W%m z7(Pue#ZHD020G|J%nWO5P*}@J+>;%l%!I{s)Yoaz98d(}S;hVJlr7rKU_jXjsnP2x z*`9Z4H%QjNG~0&K_ux@|pw7XaqR3aJ{45QMoUHjDb?aGgU3QfQRFcbHt?W%ik_-AS za5BqhlucYWvW!Sgl>Sgf8(jX09DmsU!FPF1r)bG(1w8mxHfFeasdis*U@nB8cZn2n z2~kAqfC@>tQIwwa>ZY~Qi`Nxep`O)13 zs__ih7sCN!Q-KtL{LCMm?2u=NtdiicK(Y9m;_N1+I~2ff_5M3od)!8+ej0^e+mbzK zPoB#td!sYMLKE{q8DICJ3G*V3M~n`&b^axV{5_{hG=?F^q{+ox*n)?OZ+j@7K} zPnu}Xf$Z1Yg33L!kH1!>fMfReCtO-0xKWa(eu;7vNrinry{!+a6Eu6B)dXy`n*>t0 zqB*dgZu|v@?h#ZP{~4~b>TdiWBd8Qf_j*(KsA<@BCGX#uZxUkC@s&` z@ZT` zodDO>)JE1px>@b_5Tc2!6NA|3D^f3;$cs@Z*HtVrGaN{~a96b(Ix(XWLQL#FwbT@g zWrJ#dg*5lClQO`*|FYLOBBV}qzF8T=Y;-t_MS|&QG{5J?Ff7#Vo4!uF*_W$Lk5kB3 zQa#2~K>#)KItvvwzs=4oCE3OE(#5*)jC2H=ljQ=p7LIf-(>Lro(^4I<{T=)>zh8n8 zGWBQ(u)XdLw4kYgC0L2FUZi%T#=ShqshWOOd$_vK=o7Gnt`w4Zp_8XOc*i#Sr{D}&FJ2_N!Ve% ziDh6+`#$N^j0w87WgyOg54#O|G)6DJck3H5nFgji7z1Ts+tR6>=GRs9^U#*78>v<> z(-P^Ndx3$a*q|i^*Tcd?R9jJCYL*%3RAl3(&m`Y+FG@$efGr@9BPWk^-sR=B-0mND z96Y56hR_FOyxmmBsJ;%Mz~@p%E76cTI#k06!RtyBO!dUtzkcpPMc4TdQpT757x;gJ zhJVa{kiy*k?Z(k3>xB)i?Y^;GBK4AsB!WYQDB8)JdE?-oXHr#sZw+QAXg&aDKXvMx zeTj@ygrL3R;%#xgO{(l|}xV}&!{CFVcD`dQ2mCyH?6)@S2Oe$2;z6f3XCD|1S zt2>zf9P-mU{05`Vl?f|mg(y&bjEZ#k_f6Jsk~qNIPo4a4uG7RVp*AzzMC5BOJ30in zNbooSlsR7^!s* zqF!;HOow+`c=p(fdE!I01eW}N@2aqK%e5WcGbN@uY{=t+`!FyG&(+TU*@I8>OgU4x zpFjL#rap6xn1t=*9?Jx9>@JA({L5E3aq0}?edX1v7CGs_01p}W2sDSn&ChKZ2tV4k znY|oH`3XsuVT{D1lK<}nkrR%R8uWHrne?|*e{O74e9Q2D%hvd%{C9>(BIVrnLW;r* z*}S)Mm{_V$lEPN&(n(v&^1VRF_MX`_^G}hLHtH49yZa=E=2RkJw%B?%jAqwG__}9$ znEOKV)>w8f?BwXu;!cKY;_glL*CS>akeF3Q*J1+ktsMcS7Xf4UGKAq5P^Im+joW7x zO2Km`w-2y4#?rW=KmB#t~MDS+X+3ej+2-XD7Iq!>} zc70Mz{d#`?|4IbTd_|8Gjr(McKDz%wGx2L|v8xdYX|EUm{lT+$S=Ry?$*_$isUnF{ z7G?q`C2%U}98^HJ*_RIRHVfq+D4*D#gC3TJ&?8m*9*6mabC$|ljM#1k9%%l%|7 z3EU-2y#EX5%gp`<&VNkUBd`$@MJ?_1C$M4YWGSZUr0@M}XzokXE!}&;sPO+A>T6b` z^a`J{bNDB#i%>7*8_D|>(&rTxeX{X*dt23HyShN>zZt~)W?&Xx{iNZjsmYz`r+A#n zzi99E*V2Mr%Z6%Q+Dd$PTvx;Ko6UP+$9Adlw%a~gn%v`n%}<>a)Fs8&|1J1`3?h|u zM4_0)59HX*BVFOxBQ;~Q@g2Jx48Z*H*VvjpWk(-kWAa=+wbXybTbecTu5rL_tQzF_ zuaF<_X0i913-v0}EL=(bf1tf%mMV6fBWmBPo>&j=L?y$<{1rdXH@`D-Qi(izQQfVZ zYQ9^ox$pLN@+Yj(QSGHKf-wG@rEh&jT|dDMo$vmqIY~syeeSqmSBLuS?=hEhpFV!2 zU7L4Jx(g0BTHdSQpH+8-E^JTyJ?q;eGZ2oT5`ED0@U0(9dq@p`S{Rvq{NsgBB7A`E@+9V0zY7LET{n^1LIQ-J# zdL>_q#z)!qP+wlmlj#9t&UOyL>Liu(L;g=!6Sn`%&eT+LNwTnOZkb0bNK>};9m-M& z3dJyo6}zXZx9HZoK88P3{y#rLE*l?_Mfbgx!dO;?{rpYSbDu3@lhW8mv*7pkdwTo3 zY*x&z`eQq}bJl@p|7jY3zt65F_^{&}TCBI?TW0X%*dkh8on!#chyZVUaFDXJSqYsgEg#`~A@e zo(|WEJDlptpGWEyKQ0l)o1d`$%Sr%2yV8=g&2C&jy+~jA>wu%mHJFm6V!ryYLGgTs zkrUffcaI<--y{{N;=$N_p~~N(n8aSdL4DqAH3e!7{|ORu?5(d)Y*{Ga@>tFZ$b)&Y1h(X%kehczalJkv*S%WU5M-cqPZnp@U% z79I7*9U-cWY`}Hvl(nbe&<{C6a=FbqNwiIwJ?YDHxc}(r<*NwlrA11B>7LN+5uU$h z@mGmUqS-LvV(O^R^St+EHD&n1)yMU;CAt}7iNERJcX0PIX)6+W0n;m?KpmB2Mlutz z{|qmG&o|(?T%s4?mq*TWnsws4gg9#Xkm)Be?d3=&^4P8O?eWa`5BENnD-+j>)Vpq5 z)M?;TQ1o(y3t467oWI(=pXDkkZ=9s$m&*)xWz5rCABdi3R%ZA&Ao ze34%G5!`wZCK=#hCI72;AEE3itb2yxtbl07W+gZZe)ICGWFF7y;D3384x*?CjOhwD zt=>2f)6=#PC&T}#82AjM0a=AvP$knBI?r>3y!0b}%Je;2%>QEbX@33FA2cW1TtdRf zn63&`zohIe$34BtXN0ertz2C6vzF81CkBUUL>XQ_X~@qQH6Be6)+@e2eK9?|N2+~X zli;UEV8s{cL*BuJ7LQG+jrTD;eK1MrV&f0ttbr!RQoln}WyiZo?h5rpBa&BliEbxje0sHheoR;W;CKRmrOc!tD-Vi zzt=S+_cRy;AlLELU5*DhDC8+ue%*T7ar8sQ!}C0BI{3?3^qFjBpWZ_Jm$3?o=c54? za!u^hznq?qJ`gF|424M{W-67P2nZn`1LKp4$I~{E#?M}+KnPZRYT%I<7$1%g zA~d79p28DxWCTno!G~$-r(9P*?itDw#BGw4`>E;wpGk=${-2%tOL36>e}wVz{X53A zvomtG0;As-l@E%AsNt%9A^SS?{`(mtTRgmGD5mRGz=1}=62H%gMxl-KDI@D7)8)O! zo|1*cnN!qvTAFXXMAi)4VkfC)Qp6NeV9K|3S&MZtiotbpu00v_Ae=6(%l$6B(;fuDjd( z#F4JjWntyDwdyB5A|??3H2|!Zgo9pZb^(6k7ZxfwVgF)=p3e}_UGTZ6RyaI54(K+S zWBRo}Fgg!cW4MEjcF?0pL#OE!gt$--WGa|SCd0xXM3wiY%fJqmUqr7CK37}NlD5nq z1+JtI&3lw53wAywV73eZ4tPIcg3`Yvj8W7&ngrieX`N5x}7g81nDDM?cK36;Ry}pwC4s z5CAd7$blHPa7!}WL=rX=;B!-FAeC8oQ4kPNf&2DxCEjhC)xRh`ELXR4OVmI?{b_4e z%piT+=q7r#hjke806$sE>gwuVXAKibz9B7&v)(AF>g%q& zG{FRLdT(0BgjrYLYBTxM!hK4D{@*-q`zmDRWNqa{NY7M8a;FQtJNuwoK_g3`5a16>~Mh^$O1IY_8XGA@p=|sxH&1RY*-ub@sY+so6WW*(7zJKTmT(G38zhJh)CzPE3^d z_HK1PS`^8$qMXC{8zX{9g6aNwSU)itAy+1{YWDNO^jMXJPx74iG#J^~hQ3i{$Syd6 z>-@IE0%Ye051ESVrHhkZag=gT$!BH%iDc<;?FvI}TnV8mUi-XYC*4a0NZnWoeSGcE zc3(&Q%*NV!ecE(OA%^K&rVRnvzp^Utf zwUt#OQ&1;=7D=#8WdnMEuUNx>Mp@FixtLxC3 z|G@b<`gzm91@6>WWl?#I-cXyBxu{5!Vw_JnlL&chv`aV%}|L)8CvmfaYl7qYtRB?DY3}ZqmEtSKEhEb}#j1W7OW;@M2sv>*`x(Soe(*i{bb?-DE zyTz6n%-?xF$Ei`r2eq^ub~=3a^F?;KR68$eHzhs=Wrt=IyvEPrBJQ4cq_*D-%S+?4 z8*8?X3wX9Cseo8vWI$xo>no4ZwXJbrbGE7ILEbgjW2fu#20x+RVF_ZmH&w3w^Bnhg zjXF}Voi#3Kk{)+o*j`2QZ9n|eM>KL-{Dd8 zA}~iYjtK`QxV&6WY{jdFMzVmRsKBfj7rji$zt0kL3KLRSg8P*y`u95YX74$W**Jkt#>7e;lathuBXgEAP-wY8I{hL0|Blum%BIapxJ?cgd z@$%OGSn|2u;YwNNB-#MSS;h}rY4=pn8GZdrdj=F z)%rDtOUqvJ$^l}4dK@m|wfD(0ZYER)9ih@9$YFtPR<D%*x1-cZKTVTZqRyhS38kL3l!5zf=;S* zcu@JvP;62Gu9-fTI&e<3E6LM)Xiz8+0O)cXlz^-ba6C2{K2A!kBSMs;lhh{qz-C*5yFVo}JP$a$l94^G1hF53PX}0Eys^2O~h#lK#_#p zY!P=oOzAod{Au$o2a_RQO;?$&V;Norcv#LYO?DnoPgzmW%4UIPna=IEn~$KnBF_%g zyW6BuaVcrBh~Io@{`^Oik8XR-;27kksis!EFs=>}Q;W^L;%{rz&Gfk;V?+l0yt>{p z%zn_~l(vUno4fy6aJ{Id;;XSuCnG{T8YO{{^TAx9{UPeZ!% znEWAU8QbBa)Y5WC;6-gvwr%0@XB=b}$2!DLdSxyW`XfBwD=F<3w<-t_r|Y8gzMP%z z9mz|00K+oi33>mo^dJ9~Uq%IZ;B^-XE+T~vR#q;Zia<+wVw8~o>pfq6m+#6^cyr1Gtpg{^xA1_U#g*jGmhz9-ZjiL zOaj9)UWZWtQp9ey85-{|0TVju&XnBdlx3iozOuyZdavsjJxYlW=^)A}mG#&ZeDA5f zj$;(c9wO_w%m-gHA}SA}LyddE97ywml4ag^Crs! z;+Wyu)#i=Gyha0IkUq4#d8gmS4Gu8A%E%^*-K^W6v6k1=%KFoS3d|!ttFjd%1b90)gS5FATiKL_l{3r-V+R&D8_>=Fl^`sA@ zGzX!_)?=7Pu>}0Q+O>|vRo83W)ZVY0gt&5wxk;NH-_?k5lLc`1@bCiwpi!{fF{fac z)!AYrp$&Qk!T!XXd&&7>!C6Y(2f<-n#~PIDnfHTwT_JlWy(@Z5zLXCx)-WhXis?SZ zqWQDogVi^^R-G4kmvg4MySTs2TcNg2x(Sgz8TekgWtHXC@Fma9RN${M{7Y%GBt&3% zpUMz5eZfZ-oq^o(wvPJ+yCDyEDQnU+Am?4yor1; z=e-Rx;D%l_s`t9t1#)z_E&LC^@j){+N zs7~;jx$5oah#LWXwjA?_7-cFVWdrDbD+IyyZjOK)4xU!|ua|MG6?aJruu6H6H)Sdq z>4B5v@0(Dn?R)vDUxFU4vyu{7w~DEXtk|}3X+(wJa;_tZp2HiWJA0d$+2?Arg+cob zQPZ~J)7C5a7)}!;4BlooL487ASDe>}u5G2DUA{fF=SZm7V~9tiK$>zC{rxK$=%Up) z?gu=9J;tqcr#hcX%D03QFxXBD6pVNLIYr>-t3$LS32)0zh|cazr}_f2lx42O*B5{zTSVtNmFjVYgUf zHGs%Z*MkwNg_T7kjXQ zv3#2CT1n}{nQDU>>T)L`DYtGfr?Op*;;uvO8#4-PYipI91Fic|gc{h?6pKWQ&e7HV zaVK6aIu@W07O6jhAM|I=LClWi&kN8MIn8xMYo(2e*Fh~z4ZI38JD9D#@fl(a4m?4l zAQk(vE+=@s{*_nPGy1AiM&*~;`*@toBXYPGyW+Td{8Jly{T3nLk@@=&sS9g?#Fo7G z<;>Rl?b0K3ZYrr?+aiN8l=4g1a{|^{0Sx1+^6p4}TZBD{j>CyytSt3r!wQX@H5N8; z>Mwo9*1bpkaD{*-iRNzEgAH90UG(t|Yoq0C_~WW=I5BQC&b0ge1^;@g&4cEmk7wL- zr-4tG`!lE4zkQW@qR0a64Lg){n#JX$DOokjVk>+uY<3&;TncNAyn3nX_pTtVmpX7r zueDbGMP`vsXOZb1a(4r3OKLv6NhHqO+w<%z6uY+)lob&_WD;wXq4wCOYrQ3Ej;Uxl zRbW(@>YuU5Di78OON5hERembG5{L2NVU+B(Ii-bHC$==k* zwbWB_wT4WM8tVBXz(IE)ii{f>kavQH$RKIjTX{i~UtSVdYEVLlqx(Hc(A6kkNV5m9{a=+e4 zHk)LzfE}jt+FU@^)Jv!O+8*yyBb+iaFFS5&wnI0)k2_TU$`gBhm055R#;0!vPNEo& zptp7QOEp%xw(|`i3vJvszgeiIF)Y)&piEWS&b+O?+^RK-A+6+|*3)HDN0K&VI*g{^ z-?@-Tmyv+1^_1(nP?<(;hogqIy`FBw1}ulB_e4iWvzW&af)4CwYvhlPyJzVqhb6ZNj2`*9t1!n-Bs#zu5C~43LZgn-b;v7 z`{PzBgpmI~)Rz4D@Q*=(o7^*D<|%>ah~}CVf}5tSuqxS!Af9llMttgzvE&FY=TcsL zC?=VNcwZ(^&qyN^k`kCdj?YE!qo>O*caKg)%iM0CcR(}RNF9x-K_s1-%x1eQUzXL> zH$>!cw%m*%xw>NHx>A zYQpo+0QNFgt{vTtbQ>e@Wc&*mA@3Y|;CAQXyIW!FmYIC=g{_*|sx-$ij%lzyk=xo$d6$OveNuln9=J4_!jNrv| zc23b|o4B6z#pOC@E`GOWbMw=YaSz_-wQu36-&Z3Z8ygwP?)s&Y!Qgs#bz7JqhL3k#pBOGFwo<@}-H)#|kS`9_Q z6%e;?yWbLuMnX11k6oCM>Qxe0krv#}-qC6AzRTS8Hy_4TvfwWz5G3e?-YUgYIJ8J$ z6;>jwG*mo)T}44!Wz^bZ_&J$nzx@zS#nubb>{XmnU7}vR*ih&@`0hmhxNjk*X*bNf zJz1LU(V)X20bN@IrxmVg5a$`TiQXVMC+lA_UJqKZkE}X0KKDSw-LE)UayqKuy_lLJ z&d9AVC`e29*kyieGxfc{?yC<%0E@JCSG0>?Mzzu?o^tbMs*U<=*ek}cWUEmadS!#X z5*JVpNjOXyV~{f&v&&LA?BHgv0#?j20;U?OM2I8*$*tv&0$*wiT&RFqQI3R=OxZxp0~e-o2*yXh#}j^5+|0O(79~vr+ol+sDw)QH|~g;bpE0f331%X8hrw zRymN${tOpXB^77=q994GMQjRXUe}45X5FV7I+lUkjc_5OHue^tD(-|fr-Nt+$yz;@VE{oXj;3 z@oQOKkD^%7{lVFo#^iX46k>~yvHa*03(c_NtDcz^t2h#XT{8RqhpnY)Hs{m#&cbp)`J$jz*VmKi8pQ1r;u(11=!?=&SWhKFvO)b223QFF9Jz;|<0o(5UiU?^d_F{4=_7k%R{i zG}~?x^&EqpaBeR4m`DuVzI*NIE{#Fn-w8-8h{BM5cvW@JH+UrN?9T>bxIIe!F+$f9 z`giqyU8#NMBK6kI=96q%kWm;_lM)Ncs7qMh2tgbLj3`g;SX%PvyD9h>vud0fOuL=; z>A3z|^)^G$9x>yTNg<1Z)0xtJ1a-8AvK}v9$XUmH8` zs%G;~^8`PC^rGgbFZ4>_4tqtR0M=l2*0U`&eQvx!ijs5*3xasO*tCkRE=A0u!iI< zyUfcssI^we2AqR_!!rbNRZZK$yT8ym^O25_{^!;FO?>=CLd3bc8cf;)V%^{skrE=z zNtXOh+>B1p@v;OQ!$r(4_ZifwXEJsCC`CLHJhY~155@?BL?o(nawK0V%>$K$c0RFT zYieqio`vhQbRjd4r<$tG!%Jy55*BU^vmrQp7lC8ugDIIO7D48Z@AJP~cT7!RfMtM`dGs12- zcpIHq-RHTYw$q&7xGlguMMSs@O)lfYySuw18>}gTUd`hLisL5i$OgxqUQvHJUnbV$ zT2D0LIwF{*vI;H$|FSB~C|DDR8m8w?R{@8Hrp&waJ?%gXgS=~lf-x_j!BN_s=AYf% z&f19hE%DME=fH5YZo?X1H1l-t7vTOX-B%=rch`M%$%YS;ADZh?m4kxC-TRrDc9KQt zvT7yWMB(Dn#X#N4b+xVt!T4hEKFvy02YG2p)356-Umf;_({2ufGQ$D}cJpyC zLy);@EMS`GO2yz}2VQG5Tk`(HtAEI4ErM^+b{x#nVEP`#6khPNb}EUn(?sN-r~@7Z@q)4g@S2C*lHCBdP% zly{LWM(m;C@F&9eViQhC~(toL+ zjy+_T=X@|6G*eoVIM^T;kUP0oBoyAXB(kOBl>d(qUfD~yO-g`mQdG|{fhLmswXTG! z_ z%-r%;mTI+BaMj-qA3{fNiVK?H)?~+s=h9-}bXs^wB&V`#sOAw~TY$ z91K)fK+h+(h+qqV=ryWPd!K)FKj|i^HG&#c| z?Q49;G=Fb*BnhkI9mnaS=aH2J=?*2X;5z=$5&Y9+dhCrcymIb=x%vZUvcK+B12c!6 zRdrN(Qp4*fo{PL&5&!J-!=#V>XjhBFrhii=vHp&D>$tM=IU|n{p1u>cM+jO_xQ9^+ zlR1m_#Dd)}dU2*$I^gFI21LLX^uu#yeAY4Ul*U)oHEE$KblyLQGln2+7Iu6{g;9KyK|r^OdlPHAXNddY*PkR) zVVEP=_sT{r!q128>@Ckil>Q}sS8fsX0SDhoY8xliG^KK_A^(W@Y=P(C@X&B~FdwVkglq=%|CaT3MZ0GRTl6CaD zh0D-ZYY1}g_adwJg{N`$s_oFM1Wtt9Qx0q*np%lJJ9gRS`M-9N`PgN0_SS?o=Zp0D zbIk^D1d$c@V1rQ`72XZ4QCe-TJ+h;x!Dn3LHD^pj*WF^T+f zbNj0A!p7bG8>Q>KFd46P-eM~zm&v>z_iyiegY z5W~j}Nx#mNU*Er9`~zjOfk7st(D10sPU-*rLDk}2pSz#m5-mK%Zy7q#p+y{^G zy1n+LL5DyfdL3+?W~h2091bW8JJw(R{U%GQA3>unrP~$h>575VP^}Jj%`K&)MsbQx zUiT>b;-U6*aFLe!Wcv~^rLXutfrmTz|d%+njzP7%+_3-A5KpXULBRFBE@OBxUV_t^JwHGFD zPcJ3$dr0>y%S%hgK~xguw>^J9=JCg4Qyp%Pk0bR}YW zU%DMUKN9G78HF_9P8`_}&6ULCBV9j&mt<7lu#UkqA%M@Z_cxMiWrrrro zYv6gP*xmQL`y*xRj~=UCtrzSQWZVTd1XI&8chS+h>zSU1iMsaJ;X1nCh^ zD?T9CVsji77ZXKUqS{`FL95PT)1Fe}8hEbu^+Ky>s~0@C(7;Oq$X<7}v^Qmkqot^SOqZZ4T~)$;AY7 zU5P8LEZra0f0?tZ-wEwk*U;!^OxDW@zJ=!$*6ydjnAGnW6>6y2vhMe{Ccx{RwO2M3 zC@$#LuRHd@`n?^e54T693$Wy0e;Zq5^um578+a7M8;ZC-Q8arWH6MvKUT>z=l7)@K zyJUVuY*gROU!o!K!2;aZdZWL_;ot~?=5TFaVnBQFAmzWfY;BjrY*N&|kj8%sxNi;< zzk)2v_>ZvtJ8K|{d?sD~+8+lXaKfTd6=kDW&@%LFhgJcxe%>p<4zBKSEvz`ky7?ewfaGee#@t-mb=q;9+)8X-x&bq$OX7c z2r*z1nyN4?b*&NtiKK<0*l0DKOZ|6mvauRN{>_Hb#^uhyf#w; zuO?vG?G*Ln#Fs-_+~#nfIo(WiXtJp&E{-eLt#7J3z#Q{@V+hqVaNl^b5T!rLZ9Z5T z+Tg;_(1b(q!t;IA(ImQSm77U$EH6K(;l0W8La_FN+8*R&+dbc1O$A$;RWx7jvju@^CXA~6( z)U{q!vUld?=WG8C#^K~fsT(34HwSHb0PaBK|W=p(ph73!^Lt zg+G7Te~{eW*m%JrB9YIBASSC9S&eZ*jAyFbcq-0@f4M#SZe#Eu*9nQi zitx)3JwJpnRE&l_NZ%7eod>0z{3(!_WOTaDy!(bt#KSq+m_>A2_V)IOV-#iy@7@wR zs;sixG(|C*fvbV8Lz082rt3R0_G`bsGVKVr_i+oGbfNQ$a?`+Q=Xmi9n4E^1T=rh6 z7Aj57JJPHedn`1&@|$}D=S>-a;0brXs!sB2Pl5{l_|;hE4G$it zIBv3s9LTq-eD(~r+sE4!cIxJ5aC)V`OwT3#&&|6-UuL+X@jCV=uB$v7Cpe3hdO=_4 z>uT+j)R{}S2+?oW&?wVd0STGpoUD;((8ZeIp0;GVZ+^4xwT_IamJGorE0b<`e*Kd~ zlq2hIUE2eD3}d-N_jQUXiSY3OyzR1R{>Pd+4RzTo7PMm{#+=QhABbxN@6?Jbj@Sz` zKqZu4`rg}7@ByPNN2H=Zhh4uGFjrY`qNm@NOc41q@3bC72!`Da47|&CSn}DWv*i8K zfkINGSI@M=D`7+h@42f}e?F}q;R2`fnwJ@Gwa5A~uEfJao_WI=MO-BNY~HMj4tgDm z6%*vW-|UaaNNuUll%P||4-fE2zICY7!@CV{CRy?m>J$m)dBK$FmNU#8<%C`=H{fp) zWUw~Sh~|=fi(=Ox7MGd}udjKa)sR_$&7pu|Kg>hAyuIlL$FEK1TqK4w9O~O8{j1k6 z@DU3^fA^c}StEmf6qyeFi;aiJSQ@VhmbChwM<)BDtCCV_b7ic@%Ii5NK>q0MC!lBO zDC5DOV&z}^_x&5LzDsnwgzRrT@%^C-@A9CtkG}2$mo;Vc?G(m%O*r()v&{w4uduOw zmAy^7akc0AZW*4FQ^*_Pz2bxvdKBUNm~zXgFD*^HNdq|_9+51tszLjesB(EW+5u!%Vg za!%WPid2cz)Lq6+=z@M@5WWi9T$3bg7ljaz?I#dxGUJXc06eLqptV9KWbaBeggIB>p$wv$oPu~71V0aQV&zU$X- zkVhVQMCG*IqQ%;}y6!S(&e2k%L|iTUW`68Aw$oY!U$khEPECN_x#Pw)s~xlZ$oTQ& zR1A1I1#_1#Um=e_{x>apLYVB|e}K%Ib+}$%aE;s51K}1gUaah|R0dk5yAr5j-`w*{3A}n3HR6pPi=MZJFEBAl{4$%E*Gxr9L5Mc4ZHi-C$17a^k>FVlg6RtjD9>yZ4 zF!Cs4E@Hqng!qhpy9CeChqfh4mdI6CU8Q#in|l>~u!j;gPj|H83v9s$)`xr66vv0B z9pT)53t`6b*fj`H@Xhl&jT%xSw|}!%{-6Z7>wmeu(}{4mTwWs$gS@;qSl+F_-!9kO z;r%DhbIAn{zxd--a{h@*^$nEcFWoG!z1tFK?U>U~FQ}AH9a|}1y}41Ie$C5`)G^+Z zy+G*w%={`j^XMw6sz}M`!Q15OU*?wsXvBaLx#KI{_1~_w$IQ=}CtlepSKJ<*Y(6=% zu0P`X^XlZ_iD9wq_y25?|NUL(;qt}fYUHd#yZL~x+|VKie08m_tuvmKsozTKx}}t0 zT_WAo{rl$f&i8+;Gjhu{rth6tFLU?y-y4{F`TZN@7YjSjJ)G-$IF1BJpnVD4K4*Z8 z?BV}B=1niJmv1jz*S?P1XuY9(xmWv7_dyRuY{8dxL8 z4yu(|{cEH~y-8TKsYz!3bum6^UX=2wh8{AjU$uPc<&R|Tj{JVNI8+JHhxVVN~CigAhDCfQWVWdgy`T6O-gQcOS zLSEXqO|E)tl{~q2&wgJ3u?i0_d+f2tBAK;kqmyIT;J|@$%PqHP;SwTZMn}{_BE&z4!`EH+ef?mvZr!To)KgE@ zg0C@%IYuaih%`%FH-K#;%6|RpUspR&cWO~~%9Q`| z&doY-bn50tGRF=HgHDP}fP!xxK!+&-?XIh8rGM`dx#*NCnZ8e%eBruAS+>gCA<;0n zM9w^>G7qb{58b%6Rc^dH`(MKYpM229D|X=)eC#+rM~SP!^3PXy%4_eaH)Olu`^V`= zSIE~t<@{Cd&MRu=)(4v82fyAXDlfh+{d=Y4dl#x7R3>P*Vj7C|_EhAS2e!+?H$5?| zQoYCg(rMjXq1Sjk|F~}Q;@i#g=Vy1Cro>|pwF_`~f4}&TNhA8__eQuveV6D{pZVC3 z%OZw>+n*R-Xn<=m!bWpTT2^gzM}+3)(=bT=|Lxx+%%y)%|3{O)}YE6!`t|}$HYD)w;n99`B%haD~AuH*AxemxTXdbHRvSTI|KRDQc(heFqGeC)aF| z%idTiZ*T29yOj}NdlXNo@~lHdGeVZv>eOs;FWA@CbNDiL?L50q3`(ffTVG+_yXqJZ z(G)vRQ6Tzl-@Z-fy~Vk+F6Va*HB`22*`mgNpha2qV;3$;#J74aa4uW6RHd|dUys29 z$m|D;IWeLOzZ<31$NM+v-Li%oL|s$Ny39GmQ`pAtR}}0L#V%JAm2JW0fOGg+wdy1J z=%bHts>M<43`TL*=e+@iu-)9eL*?~#V#b&O0|t2Z)KL3-3It;8jyADYR#vzVT64I= zf>XeMP=GWp{T zb@H{F8s*`qoi}HDk1UN8IL(|wj@z2j$%3!<{mFIJ9Hkn#y_{%1z#nieeCc`y{k9q z|C$SG^Y3-OJo&FCgqoKhD*pq+b;+qX(?{A<_PVRHUq zwe3tJH}l{7(OME^iOhuFZ=NtCRVAd*7qq zU9(d@bFcSH?HT*m$T^yN46BcL+nD(sZQ-&e`AA)lq|&dZR1O*v{zGuoJp4KR&+jyfx+K1G z*9rY(aGgJv_1jzJq~AIM{Lkj~m60lqfn6G#TV=t$D@6TmWRELRKTi~noC^LZSf^4e z;@b5j|WR zcHbAq^lp>jJ67$;{p9elh-}+Vs{iSu&9YtnC;HmhzUq(jng3LoNmq{VuXgZmmuJ>{ zVstN+6ZK1@`-aRO)~8lRsU5$2g5c|kRelVPfaPy%ud|>pb8ccY=eB={eE9f3ju0dv zLgE2nM%*(;GY3K;#vnH_>e#7j>>59qgO}OGYUW~|u?Jsfp6SPU_=c#7I$m)F@glW> z5qy#Az!Nde4_oKng-`f4CE2#KE{Mt~mu!XqII&icf$_LwKtUcb;!P*qWi*v)l z*hIhaVcRy>3H=yrkB2Qg)dO}L+rErF%tbLioipdF_Xh#HwPWju%e*?)GrhG3fsTC# z(H<-4Ae&bzk#I=FZb=BFH(gdEKmWsa`QB~bFNhr!R>)1%n=183ubj$7r&P*+pC100 za4wUP=zGc;8)dWizC->?**jqG=v%B*{qlU!KJh<5jL*lOM`C@EPaN%iV>W%c9Jecx z+z7rs>r(Rl&sO`sF&i6G#+J#$*VV~?-`FT?Hs<(SkeOV>BSiCW{Jc?a_`D}*81voL zj@=(zR4b={cav%AkRx`3?&{+FK;6|*bgchkQh)xvUAwE}xG7yn@a@>EoS(OOdsoT+ z!}I^+SAJgNM^|OpJsdwP_a2pUP%fBJCq2|oaZ}be`hS33G_y_`dV4=t7OgTPz2j{k z@1u9DdoB6b23_UsvdMqu?mOe=1C#(>U(vBXoAJiGNaY?Sy8h4JeInPemr9>7SEYG)c)7F7FJG6~ zMSI(UgF|8{cEbK>%0R(x-^*V4NXFEv2VkkOy687o;q^7Erl=orv-{P`8N=#T&f&~` z7xU(nzka>bUj_`vAl6~q^pVE12u`cAdg+Hf24E3f!{5<&JgY^zz?8rqbg-Mep zc>}B(#9E|GIOm*mw5W~q@#Dw4qOK7m5d#WD)&&a|=m)Gp?8Srake4^AAdWF(#^|`w z4>5Pfj2Sv@MAUd(i{pKi!S%g6kAju%@X~r^;Dpouyf0?vyE0d_xgC;>Hp8 zDz0~DD-fM=uiE0XiMzAK%6O6qc)v9zkKLn3eE-gEGC}R$9n{zRMbrjz?t&`WXLOnT z`p?-@F4%M0qn`Q0m(R3GRp4ukI4M^j*KTXUM=$VFf2-0a<{VTmm!9eU6|b#!i#c!o z=CpkGj&1TkXLrlQ>bG96KJxyvuhqzJAK$g#c>nP94moOSlN>iaEOO7-yIhW+Q6Uff zbJxb`C&#mnB9P<3eZN^YZ);U~b)&z3g(eiaVXwjEGO}N3mr3hT?&X|J0l}jnt@9lCK z86Jb{kogpPxHHgl-An6aq{`iU(kLSq_y_c_E|Z@q(f4D2S>EpVnBOzcKmfUIzce9Q zCv00rM1A-=t|RZhNav)ra0*fxGd_^+oh)2!Y-n;#W9nk{%(54CI6E=bD| zEY<3Fqu@AuOw*3EY%AjL%VJB15r24fCK#P8mQyGCe?g53Z=g!CFmqUyJoQ2Qe%fi~(6HdUY*VxR^*!g8 z+=C+o;q_`)=>3cKobNw919{YumLXWG)gSqTXodrp)3D0yS9s<1n;?G^l-`s|Y* z2?XZ(4dL$vuzc9yYB_s+ce(99d#-rjcEW|t99>r-3x-CE@r78%&Rci)ZJvg{s&?;w z{GZF@8YLQ^J2E(x zUyw{H=8bv$SUjW+Vi`)%C(hv)yK8YY{igotqmRm>MT@jh2q6?Zez61A^us3VD2o>_ zmOuXSk2=>cgjDQ)J>i5CWZ}Yv`k`SEHSvP@c<7;rTDxxbbo~Xs9(?dYy*n3n_u6YO znKNgO&XsGYFu-6FyYX+@kZbtYzy7sO>3}}04ZCvRefM1*$EHor_iF6y#S0Hhd*+#E zv`7k(7Hx?A;fEiVS6+EV3#r&OeBy~G>K(cmgO^=gZdN12W$<|Cop&^&{^~w}a1BA# z6can+Ke-OxSkM?gXOvBo|7k@ctWooyl|~>%^=i6__d3%ZSD>9L6kn@ zj5FlpAOE;}UF|u$vJM7VLy*y~jETP6PJn`M9_&6+xl>R3+Ge@+@|sXOg4f<@mJ6;{ zALUU$1J3+L^v7n@_g%T;Tj6gQ?|s-JSKN}EmuyQQf9ce{O6A`FO@8x*-N$&vmH#Yh zk+~N;J7eO;o;^p`&sQ(B3|EdSP9e;@b8#{#D*%xn=b(=CVn)e8{GgaTNHm@Po#OX)*=VKUl zB=@u~@BL5hkX0L6<@6(**naugc0DFwlBb=Fz=O|km-hn?EW2<{IJY$(iuUaP_IC3( z`kGN{BYz%zAc;UU`3BLF!@Vg(i$X1myWt+;+*r@(U z(x~drxvJae>i+`z{<9(?A+0a+&VBNKe@_yN{NEQMR};RZa_-C;wF54q!!q!Vx!vXH z3E}Ta81mKMtA}f>wb*7imApyC@*Au+w&O32lhrQp$%FmrDsV5qsPe+1460Wj;~(p1 zfH&>7ARLUB=fxoaonpZ_|1Vr|mQR^b)9!-r!AevPmOEbE>^|ghMugyd_v>4( zJDwjfph_M*!P}J+x5m#*>LGZ=l{eNk$>G0U7T4V_?GUy;^Y}`YGkc)S85VudUA}*B zd2q>A?<4l@5=nNWKYo3^{AF46&xEU{_KlqU!26r!g*6d>3<-SC{`Y$R5i1g4Oe5Z9 ze_8GHMXunh_URiEPX6}MR;ep5k!ig;R``8({Wcl(mql*WPtO`DH%+fU9IvTEsJ_E zfIWCOsF$fcdX^|9hLhyxXIeGGA9X7^7G{h0_eO`U_Rr%Jp zzNH`Dh5^SOd#uczJ6E23@=5u@4}PHM;0t#C&YU?@i@!!FMLX<39KGqLn`Ft7C1}!O zFVYYA9*(BkF&3|2fxZ9F-gSUiQFQGiB|QX40t5(z-h1y=MXFMzh=5{2uq$>!MZvE8 z*gwUJil`_kO+ZkP-a&d#Adub@%75OuncUgk*?Vt70s(WL=ic3&J#*&FY=8Icd(NCW zL(`{E=Y(mIVl}7~h71`Zz}p!!W{AXWSdo;JM2#9X5&-kenKOkh0L7_Or-}&LZ@&3P zfWBCPs8aZ0N67#kv@OR1QzWtb@WT)3@4x@XYEc)_)DrNOeV2Wa%R)K(&#sTEj@mB3 z_19i|O^g9_&~MOR0PvLnubg7NY9Dsc|0-3gU{#?O-B=ZBhl`xEj6f0KYZ(&P1>mT? zeM$472hPn)QZsHQvl^+?z%J?9uOQ=DG#3{MXJ* zTKBIr+PG_DFA6PPjOMP^JkYpsU_>_`3M*TjR_(~74g1)hW~?%~o5^V&0BSDUxl*e> zSd>aTjv2R8|JFWKCB(R&;e}9zGR88U&!tT-DReqWAZG@A7jAZJ7bM=o3kfl0w{qrD z&dcih&h@-$#t7@DmS>YLP>*Rbj;613vt2aXQ|EJGE<2NSo~HidCZf#UIG}@?vAD4T z?+azE=QueYquTh|6X{*ckDgc-%iGOmH!f*LvHh{>Bbyo0SuB|3Bj&&#~)Gj zd;f51#Js*{3GyyZQ(6bp#MK%CZ?m$SWkgseWJh5lve-`-3e7g84)03LM|^>{#AMLY zli8n5E9X>yesMdsMTp_SUb^t<-UM0`?fleQmg}wyIqO`^eE4sibFJcov(U4-yX)v9 zV?C=~c(mR-z=?gC!|ttS6>=zDcEqRC*!3sr+jf<&xT9v)yex#YdmtTy{pr@q_Ah3J zRcXv00l^h2vjpbs4gRV7ZIWWIx#Qn zsS%`RDh6}MVNZ=j#j*-en)P+dsG~*$%l1(Y{{Zwtpd!r1su6~CBL${U1Y%5v1Z4?^K?gl$BnxkCB}#wy$tP0n{}!>F9NbG zKc=AR82Dr&efm@Y?dB-w7iYVVvJMHljyg4Q_PusEb1Z29IM7Di(pkT~;d>#d7zE_I8h&Jj581_OC&%^6Bj zj+KEm0|e!Q(?Z_o_`?yp_s>(bj6q)?4G255W-0n=Un2c;TDdvRLv!nk3O`sHhsQI8}MLNaNti#qEr(|AUdOFz|5iZ8!BKh-vg@kN&vO6)I4klZl&-zt# zyaZnug|7;nV~N8hCi~^VgEfPxbBOVpRms=+?0`1W{*P^|N+3t$xy!4iPPJnG`7f}nD+q%EvNJE#wP z;NDe%SE=u63LB_bx^!s)a3Z1@_R%)z03@F#HKZ|_R zk3bRNs~;EJ(uOsP)1JfU2{*zof0izOQ0DUt-8+cMx^*dDv#nj<#(1%pjSO%B3`!k! z3Xa+L=j9*h#OGg^GH-ciPV_PqD~4X@67a2Dz8Fm!X8h!GKo@)PQ#S63c0%gp*Yrl- z7ArPqF`w^6>hqZAbQWEjm8942Gv37Me7b+I>#*tFN6t~7$!;PBb%qOJS?5M2>4OLO z!kQhvFz!ZQs>D6@k`61WXz@cjKrfjRYXF&TG*FZF2ZMUVY2iPr7)91!o?h%;^1 zckE-gv~%oxu^fd8;P?5jlPM|=x8j~l_437OAScc1!$5EK3eMByxE{V&1>o!MLEZC_ zDAK%=BXDnLe=1Sj@T7hv-6^oPhc@#kB&9Qp1bq9{_oWby>@>?7h2Ph_PPdFG<<$}- zwl=4$@}DmY=0uO(s_Cz1i5VBDl!+@A12(vxze~UuI@!1gX0?KIm`+Cr_< zE6_4JIXK1u?>Rc1;dsD#XJ4#S zP560{K5JFUS!SBs8lOf3{yrwaV!0}UxrX0t>ZDviGPKD%PRrGXX&a`G=o0GwS)oga zJ>U$3)%@-ECqsip3zswk5|s1A2snDpZ&)r~R3D@f0qTh*qYi*pByDThu%Q4(0it4F zCwN{0@9ll}-6uGF0X!nX7C_cEZQ9U74?UCo@2(05s}6KbVEWy1bAD&etjB0e!PhAMZ53|KyJ^TJq0+8F$eJ5s#Pli z7!MpcQ1k&HE%e}9ZUi9r@ZrO$b?epwd__ONNvsOmG>?J0-qpOh9+r{#?an*zq_bzw zit)zwQn`LHKKRKP{c)|MyG&0lxeN)PG7I9pOXw z4f3V!%)5Ngs|*G*=zF!wz0A$rr)_prfazQvQp!}2-N{Z2&@Px}9~C(5>l9k+Ayq{C z`X%Y32brAS1lHWx!kfN*vKZa*W+KHf7qMB8TyWNXrKr4y!k}uo;`Ht0AZj`(mJ(8| zbM|9N~O)|F&{ID+p;lkYY5dH$B zMp;fu#2mzxgGx}PQYAQIm5+zGbA*WF!#q70>-ozqwXfjTtzXWInud8%y>cZfiMg^j zp3D%xv#E~bEr2OMN;7C{mK1(*9EYe_H!^_1+mf1>4l!(h*}*g#*2oH203a^I=LjT9 z%Wvsm;^*BIl|kE1JEQPAni9GlOUR<7hYKz`J-_=`nCzYN5U49ipFiH6K>Z>DsBH!7 zIk$0$5B2ADzhp<~7Nj4l?XnDLj<4tXGkR5D&X29_y3WqH^g<2z?oZ03hc}&;-LX#j zT3;ieBz@PZa?ZMPJ|$?(nv;|&?@7cxhxMMppU*M#I#15hDdiOv!y%m&;vF3Mzqf=N#8o0E++-0kTP*+&aotS-=&W#8I{B z!VZ$XA*wSVATS$9H13^fA4$tFMY{lG0g$GoFbyRCPzEp%{vjf@PoF*_=@-fY4#IX; zR+g}Vs9%&LA{M#;eDPBS>$`UCDt>A?+CXwO*n$oOe$=d4lO|1?q^jo3n*m9HxfoaY zjdl28OabsBNuF7dzzjcC&^D5^0sMu)1^`|}^Y-oAS8xcU9DN6fof`q*J9Ow!0hXg3 z*jDK(R9je&$Y_iS+JY{|8rwmF<5C}EhrVMwVv1!M8Qr{jbHYy**apo8_CIMGe!wcLm^8}`u`>}M#4f7rgT=c!=d zfY0z9g7)yz0sCx`T>T>;pV+Vdo)mBoTGcO3Gwyc)K&=~=pkLk$qx2bk|L_ce7}nJ% zC&`$(4^3;7q@Snoo8Q+H=^#@zsPfEke`;OF{vz_$}Ry~uZk2w$D-JbI4KctE#e zuDft!h8FOJ-so<=^w{T~M};qZrmRR>rncedE$$rEBIeR1dVg^W`DBCVBRl)%L_bT# zI8Idf^9JW9tlb)VQADV9Tk2loEO|PZbnQypxtYSw(x+TUE1B)pc=!~7i}`Ao0PZ`E zp-U0i@I?MPkUIU=Q8>3_63nYNt(pEjRMT$pxurl}rMf*$Rue+YKf{-vzL@w?$z zChdxX|JlN2C*sdjTK4DSJ9*tH=22E9is?oGa27_XTBZbzZXQ5AYWh;`Q2X-;za^XKR-Fha29$axIr?-I&`oGvABxhO9eDiN2tv}_so=j}Y5C5f&*U`fArO4j+ zOfBGxemv2xY0AG?Ip zt$&@Mc<^*+#G5YyUv{WWExEpwF6I5{&z{w2(#9xSddfsaC6%(Lf$aZW274Qo_7T4( zJRQfpz0ujxy(V?KkHOnV>sXHm=tXh@v_Wf*_s``IR=gq={xs? z^D6_#n*9C0e|x0CCs==oX(L*eHlEuPn9pm%w(Rq|>0V$1M=f9LBJa(9|NVE7!0Xhh zQvxspkox@d&kJx&>ZoJkC^HS1;mRR_;$rmq=U>ndKl~toF#v93#@tSK+;Im3V+g*% z)y8QjDk_Q|dE^l~eE6^c&LScr2noxUFJDgYzWc5K^x((f!GmeSgbDP@E3eS9Wy=Jm z0}_rs{q)l!q1jt+y(N;d0enN^Geq_xIT%EOn_@w;jPkg+IC}QkXKCx!tpcj8K^_uO-|cI{emkB;PV7*p7W zppSB_0F16#vqt!jZ3u9)M~@z2TLDy61>3xBefazDzo*#PSON4R z5*vM)J9jS4nKMU>6Z$x6)F=^YjK1&Lvqu2NNOm`E+O(V~X?6R;N3)=h*hk-c?>$?G|dcb$(rgOAtt>Xcxb%PQXz*h$jT}nUB#$TjAnJ&S3$#Yoa z7f#f^vHOSL)m!ieGTQ?-63n0eH>hshRRPw%^RO{0ES#w%TuFzW z!{#5`oX3aShP(NV{4(Xp>GKr9)DC9p--dZr=O$Z{qM7y7Wlw-sL@d7Yqw~S0Zlw~` zu8#2m{8-F+dj7j?fP;^XQ}KTEuYKI zDn9hn$O?jfO6P+o7*8#3t7({(R}%IX>`N7(aw>mP?WalZHK=|-fdIZwF1KDQ)&5Og zZQ7R%5ACYvwL728FG}X@b%N~`{EKS=-(|;AY2jhVHK$#ej|K2`N9k}mMi5ER23EI! z=~Xkxi-y$bv3?#c3!Q zK5bixI+ruv&!cXLcL|!qK<>w!jP3ayXSCE1J`Bcw)4nn_W{_7(yAVHG-YtTYwMAMY zeIKhEOiwUKtCKk9kQvNi?+K3DRV9NjQn#%;K|gho(%0#MwvX_Ej75#VQs+hxp$b4-_7P)mX9H2X*d|gJfG_qD9d9tA zWD%CQytn`WKmbWZK~zzT{Ye78*j9+vm4b1X`R5<8x@_690$9iR?cBLj zY+tMZppSmT_J$1`1mKHu0O=?@cI=qgW-{p=BBK%At8N4AAE3y9zB+yYiDf<5bR}}1 zk=tPV_U%F)pfv1Dk$Q5uXnJ*xfDuKzx_rNE-ab9fmrC>J;by^S?5}dvZ=;&U>FsZv zQFms&%e5Tfr$5mrr8+NSy6Aq>J;Wuu3&3~mR3~r+gJ}1qdwd!2b#ETJlm9Q3BG2H8 zNi?pXw*~O^;fLyeos3|ZJIfE>>PscFffH319GTCJ^CR#xKlP#WhE3IRHNSpO7LAx{ z|FAKvyEjd{%X%}b+SF}>$A{b=f8N@3dgNy7c0&a2 z&<;NI@zR3b1{V4S_6hvms@UAyQGUO~QQItqt8P^e-QqRrI3oZ!Jdw8s1!88=!l* z6dBx5f3601?<$V81$MKXqkQ|;v7RSeR`jN-Okp9CrCo{>MRhNE1OhdHJd7qO5tq7s z)?Gs1OLoiy149|i4P8XYxF28(16mLoX z>7474Do}d{c+C>V3DSP;RE3_}ewsdIKH#cM5%E%^ za@4z`{mX;HYqdG&=(R>pj$r8Ay5Sfti8g-4fbt@7Jpury)X&-R z$-67S$`NFw-e0TpXv?vTd+w6VNmr61m?Yo}pid_6!@NTr`wBoaNlgHM*Dg4AbfP=CO_Mj^Tdan8+^~_V)mTh7lc5)-4pA3{rfh`?CD%b0#vafO-Y@jdK zu`eoh0Lt;ptzgWgz@BPDw1W-E;@nOSZS=!#AAv6F%58@l>b7xMZ~**rUTqhCz&1pd z!yd|E8@A+njE@*oUXQ69U#v&_=4}rf&{4+~Z9rGH1#lbfxfAF~fnE3|+xMittXniK zoFh;K_HHaMhVh;U>J(@L_$DemsG67W1r82{l)jE$ zf54AAH+LP8Bs#%=9h#J+#qXA)S7xWtH%l}fm5RBXJ~A_fzI-l#DlJa zNUx0#pj#LS)d>>1y>qYqjW+6>Nw`4cITAQMmz&%U@I9M&fwmnsMr!DMvn%_N0pEuQ z_-QYcw{`U`c)(Zl*}s|o!|oEUdx2or&!Y0eJpzC3<7g0$-hKFocKa2IJXR{Zv}FpyW~Ks0DCcyNM9Y`i*=~~=33Vg z%g`&5f*pZUHh@}!f{^O&4bSI*7X|B|E9-aEcg=f?x|NwX_73LMeSeqcO8{5K^LgEt zv+4BfF?Zmc0}y(ou>)AlXBRH3x$-{6={=6>1^t36`SWy&0BZ%MgXX5?=2ge(trit% zSXKMn5)FcUXjzX4dU$gb%{$=?3RufgwS&V;aRNQ7VF0nOHLXA`IDuO4umG})pq0Ki zFXK!2Uy8iQi|g(0|sm55+qYx2|ypssxR@*=7iAR3}NA&B0E zfA9@p8b14nf8geYKB6t5iwIcMLo_Y=1P~5wOWFbS1wV)xlvB&$2kglLb}FP#y=evFYlG{Sn5(Vzw=JLt#jm}55btow_RZG^8Iz_*|{d;xlo z?&D4Ob5c4x-wqr=WNj&A$jcAHK%DdbhvfL@a$e!HKVCFLLfXE4F4bFKYI_VY=T1okIHt z)|<@4)C=_cdixvA-OPKNTR}wWLmwnlbo>RnyHD;f5Ey&VK19Vb;%@4PD_<(}OPJbB zeV~(-JM8Ir$V9Jbt_GVGLy3RMoQU{6Uy71WkxyyYUe{9d76HLXR%57bI`6BGP zqq+#+R1;?@-*;F9$*zjz@{d5fmroNY7gPcL!rG@CH*PP{8IT`hsfIpI+hX%+{{&LF;&A2 zwS4Kfy~ZyHdT|-3AM6AXMtjdN*KzWBs>{5=VS)PdqFsG+dAlm!TK}L2kXH&4w;?H` zkLKm}zx0X0elguEv2~TVxoKLAoVZDv{M*!V4HY_?b>~~Ng1YfNVMgN zwX2t(S`Q`-J`&QHGz!d6e_=Ka6c6)1$0vbmCv_Q%mP z+oD}^ENAjb_r48Lv^Oz>o^2Saz4t{MphftobtR7IjpNAOXad#3S4R_=f+34OYg5S< z(Ct+rfHuaaQ+GGOw5xu2vM$2ki@s`InFcT|#CeVgo!zoBfpP*8rWvOPYlcvVU?*5! zJ3zlbG-a@Re%q=R7Xmrb)GV-Z^oBa-Vv1!Dy4jxxEalf)fdsI?ncBH?X92V#auI+V zfJBJYQTwhdW2j73$(g_V?z=cS*bo7v0aObP4q@AlV3+Hzb8HiNIt8%$%{Si&iUt6F z&<9A@uU|jv(4j*P%>;lpfRiu0@PgoTMdYsp;Lz|JZ@eKmg8|+_PXd+jTkpHEp8)Y@ z&6*{EWaz`Tv;q5eHn2_#06-*o1D7oN0^ksUq!dqV0E~R}(MJV0Ec%G?fL&~#S6_XV zrc9Y4e1UzGL(F!uJuwban;D(D#ZJE9P_sZAkxAz&A&MXOO@T+aMW9 z`FI&%uk0iA?CN1%kre(B(C^uW-|x%r;Ym)AHFR<^eLgjS>QysdWIvdfPCu`7aO74l zSByS+)}LzIM0Fx+_o-RVhb44m=64ywX0LWuV5t>Hdj9rKAic@}@x0~C=jZ}ovWxI_ zN9mB?pMQgM7g5h6Z}MXPUFS*JywUa_J5PfqCkeoJHuLD#scIZo=p%Ca6a#M#*;f_V) zuYZ>&lB7wH_m?%9gr5+@Lj`j}@e6~WU$jO{Y(2oq{ysKRZs_VejOOnDmG4sNG=sj+ zj?e?vcGjMrolKkg*vPH)XlgxXq>bfy|saqq`dAiqe-jkK%nMPs{K0eo--Pxv%Da_>6hF{ZPh z&L9_4%U#ogF);%qIxgjc7=+=U#Lz#7@v@4yg^Nurwv>zYb;|MA6_cDd@CD)PmA2^# z9fAWOyEMQT;O^UhCD6~iQY~P|=98JUbblJHx+jbRHS`QMgYlxy3HSmtc%rL)^hspm zIT|(h4DDgS7XtmqI|b4G?Mmg`Z(jELGDyY3_%PT?17(W%sH z;g@~zJ2=yhb)!>k~M7Dkg8OvBGr^>F;13s zmNstOC_rP>tx=-})va5P!on(X^r#U`OG!zlfB)ShIBkm+!zcB=!Zsp8QO_&@Cjrz+ zfjU?YHSfTPkj^Z=Y9ksJI&Ke1h)U$<^u(Ldd`1W*Z4`-^!*#@4Chd^v(Lvo4nL z13-)fX;P#vcy=*P$;ruLe*%CDUjXg`Kvo5I%>GHA0Jcg&8MXoVhhZO_&*)_9|kB;)8@0VuMm%nE!0BDXG zk=)F=5$)_fA{a~MfcyT|FmCvwV4xm-8UnJka#XRjX~I_lIdfPibhg7DJRcd zpm8sy(3!Z4^iwvVE&vUwfT@dnn4DW$lTLi{MH)FJi9VVfKpnD^$i4qlI{mcD@lfik z`IH__<@Y$jrdg)l=|{J8a=e8$uiJP09Mxk`{EZ1^=;2w(G+zR}X2mPK{3$1tL!|GU zck|~*ny4?cdmk_2#D{<7KC&8iZfdC?D@3`j@*w)w?2CE%w@n%J;>e4nOS;yLIg88j z3yo-|qR4OITqRV7zZ5V_N_x&-?O0`1Av=)q@4Xt(MN?mG6A%14z&AGe0xjB>z0KSK zzHZu3+bf#S+X&p<*`KbjmwhZ;!g%Wv!sX4Q|8A}L^b6GI(^IrHGLtU4*cPWb5_kUY zR2tRH`oLd}lclNBm_bv!ByDL-q}%Vy(-fKH1gQD2rRl5UsgzGK8r$6Y(5u>XHSf#k z?LM`w050k*W6s_TXH&uqB7Sv%FIEoXDBrE8VufDO-<27G5MS$kB|J&ss6Qz>4qrDI zovd0Q(;k)m=*B9}=toZ)8Cx%i{Szb@+I})0_g@}V z=u%AQ8q=ME>0a5m_JwBU$d2b(^$$(_(zOb;3bB5XsOlks*)&eF22i+ukT*pz<;Jty z&ZsJu`5Dd1Q+JNUMU?N;+fLJ?bweneL0~&+#7WwgcaNaj3-%MgbE3UTnF92k*D;*i zbUc56lH|?}k+d!*O*|@`5v+G(H%RU=7nWF}*-7nEt_h+$?9l-7k;E z=eCI={Q?*P2~bG@3!vP0-+f0fz4Vd*CIMW&_~MHq+LU({gD}3u$+b4~x;sQNt@QK^ zj@lf>bzjZ~)&K+oL`22n3{;-s2+LVCd-mr7M5|S+7EPZ%odygTAnZzC)JDu@0O$|_ zI)DCr5v2-XbjFMsG<^7QQBC#(^&|+V+QU4Ocm4R|kM#WW&kFz&;Opa$KTeN4@`wQT z%=)@zfW`|JED+-qAJ2gz44jV|HHzMS_uU*YPykLrvA+5MKLAiqoH#KDKm@3{c=2KZ zAnNR+%-nZ?kLc?UKl~s7V~pPiAABG{akLHK7rlH?KL`Z;DGRZ*sx(V zZQ3*oxUSZdw$K)SQe-)n;R_9r%t7hKKh8}%$Xy`6~_UN3G8PW3+adSO)g76 zi>6oS2pFF#uFgJORD_=$htoI!)yS+@(4ETR1UMaGEzOKlVND+N0*x~jcsercpi$;)s& ztL+ud7tRsrTHA-Z)-IgB{TIF3!eFl|f&3B?R1H;2m!PU;_?JCx9AvguU#bcg1y%B-D2kS&E)H#m*#wQb6tO$ zvND!GaW0Z8H3GFu8}}#pk} zKHD;w#;rJIUUk`*EBaZF*;wYU8Gg0RrqrS)Eee^k(x4H0MYHHDJn%W-d5&KRgg8z+P!9n#Z^A>SOd> zyKrh1>`RYqJw?AqIVh2EH~8_vIKlUOiUDc-&hlqRas1LFo)3hCtn=arAU&A@-DEyK z@Jr!&KDCiApWBeUO-S|-jEg8klj}P6Q-HuzcSqC5haHq2KALl(^s{Jsc}GA3R+s`1 z1DJ*%z)^s}5bWFhkG~wr%*+r#8Ngl$Kr_^bK;L24QO5zWIrLx~9K4vi6V$^`f^>-H zgkJz%F@+7)f2l9aboQZxXiog#FP5cX-c?~0lNtX3? z?aFdY(I!NW0qgvj!iTRsHH4mVE_52vXEn)l6p&O$zzQ$`g z>f6aT+w3`>6FAy<|Jf{R#1X=|k--i0=k8PnL;m0hU$Z3fi{({E)2U~5Yvf;uKl7bO z_|kHweJGMEGXk~CTDO0^!ru$8v@Z|!w>$;jiOJ2&Qh1=|W|doBdn%0{Uvq|5+#Ie= z)YUg4fO>Ic@SHcYodmQ80^Z(Iv7(-Xm$10z^G&ISuJL({ha4+_UQ#63&`-+?X!`clx2Zac{Ja-BMbj{q_y@CpCqdRb5QLoS=A&{z8i zTM`I_u3BH(R?DP4*pT2V`~WxzK^^>12R~^;`V1X2=!r5ZvaVc~ZKGWETh0TpMEel* zLxSL#%5rlbp)38uycF3Mmd*Z4eRH|gl{U;%_$mFsI$i(Nc4d882C!Goqpn&XcICKY z9zU#?Ww3>Lxt&zN8hQ|QJ3<#WFpemfV+=4EHe?-9vFPub8v*MT`e9;waTOQ$<3zun_k*ReKJfr+yD5(ez1v)VqWACSBV7 z@JHuGNS5gmksUbUyZ}K@_ldo7jgqD=c?zKUZjM-O0I;l~qIF0XT57xGC==w0be~=H2?`wTf*0rUU;UY1e`F_3F{Fix1 zHmBOk&-o6V%w zgIxhC28Y9`%g%Jc&rR1rYWIR+;;iW8l+b73CZYmVg`IU zF|F2D6qv6`ZZ@Ja$agwTsJlXV5FZ*n?EsN=1cY z1hVhpibf-kBLKj1**;^mirEzQ=2JEivu0i0GOixa^e9btw+*y>?yJ-HqznatzRRuBtz0_;DEMhPx|iT?zKnoQ$$H2i5m40(`Zjc6kcxm?Fl}>b^K9 zIE7{KteORpw3k~N2YZt%?%vn8#M0jQO#0vM1iHJS6$pgCZ+9q1UCtb%(`m;0-^>omfmVn zUfWi!KrfowB$S@W=Jl0I&o&CR@b1cW>zRt-Sx(OOZ(=4r&w7$R0TINHY&j*>4AaR> z1<{)$hQHzn;q3|O9JOn__W~cTd0)pefM;ws^SYqyn=D;mGk9@tU3c8jzFa^q>i4#(i%_DIt?)0vGc(#UY*F|aQ=gW9e-=+H6Q z$f@`rnUUa`1k^xp0RGqh{rd}$4WGEu7hPzJ|IcnnKNfZL}^$U zb8z!`sA8_CuERVyjXQSiC_qTGhlFFWBk3e0s0qKY9h)|7N}x|bIc$Tc7=4xEu05$I zr>Z{2)}2TljP<~Q1L^eX(}FSq`vCZdA#!_3U7bzYu2~1$0Q)K^9Kdf3o6VXv6Z=Hl zwrve#4NhJu(jKN54~)ymkt4;pq795OxRGHSk<$RwG2YPA?IY4|(e&yc0q4IpwH-$c*5A;;X4Nx;H0y8|O=2 zC5>0@)11WYM^50T3fd0kPu$J&?-An-uQnpksA4ve=jDHoUZ6&|CeXcuyy>ygxd$}! z+^`1~`CLf%GT@7GyO(J)mTt(R`Ky@*VpA6BlHX-ANf;uRbppDEr*6~p9qRPWWi9LK zBU#LPx@DZyUmO=er{gZr688N{%c$-?H03r#9vk;rcYc*&p6YL(2%_=tBoY8tcT%fj zacWRSfBM?Y;Oj{~M)FGlpf28$Nkcp8K@gO?0(`*{1kw*y3gfh8?Jy|sbik@;)HH~d>@kvvdtp7tRoQQ z#YuA1NYL!ocgGsBqre%GK z)2zfp(C7R?E{Xqh1^l2G~H9n7*Q+h9A~{(b~&zfvsizBQU-n4 zx+3jL$e=sdN0RYMD{JvrSK+0oT7VZ#Z^HMY9BDlNWDeZ_kb?DF`2aGcVlhO~=Lz1%h-g_@6 z@)2MvemX%ivDG0N{=B{_w*ObNY-if$hG1`_c_J+>m2hZW~u)lcStZGw`!ZmoA3=31#q6 zZZ9-t*JrGgdS)GrVV^#Is9U#g0+huvfLin&FCnlFbJBPSPVS8otu{wzZB*Q1t5rXu=$|< z_T>QjVqqqIzQ8`>(BJnu>es0x_32oWj-I+euYBf+K-zcw0!N-+ATuq(|K?{HczMy= z+D!4FlYfsGZ;KF}Y!>)_B?L+A(v8m?xpo7TTII^>`|iqEohrq10D6FCH?{Q=zc=Qj zF^97C!6!YRAF5T~*6q%sPT9%h)N=FuhJUlDJcIh+1Xcyf+;$(ef;*~CLC@ns?YU9@ zH0Fl91N;6QH9z~2Qgr_ZN%Z?h*E`0|ZLJRzvX8&4vp?3PPHv|QHm2JGzI{1y-1Bqn zDLr!A7W5G{tdo}=Kv$SV~p2W zO+c#PWzKbjmr!9!>kw)bVt;LabE|_N6-^{@qOcBGINesL&4M8mE;ok9ncBzdV!csp)wq@aE9m7KRqA6{%6N|BsA z4V>SvG}UiAs6Wp9yRao?2?NS)mmCtjx^@6)Wd{EN_DTVr!muhq)URS7h5x!w6bI;{ zUSx%Qo+4^^LUs8il+pOF?W-CKY?j7uK0))N5^dHN6}uV%JdXq9l0Pg1G{euGK=;t0 zL-fTLU*zzo0w@O9wrJ5JTDEMNpe{fZEkNX(Zn{ZCsUlKU6-0glG{zJ*%a<=t4?g&y z&{g&1JU~wTRFNPa>O>-*HiMo)u4Zt3L@DIQr5_KIscu)X{ z09eP38%Gfl5n_DQ_Asv-C+P=(T{(~G*|TRwAK?dlL$bO1@4sIpGDG4qtcPHXx_0eK zD^{$al`B^Y8))bH>#r9+mMK$)@RI`DM~@yA3Cci60MHW=&*R6B7xj@a4dsaV9Xxoj zh*||`3IF66Nn80jRr|hS!v-9H#Z$0e1_+&7!IP3ci zn)$dNb!ut_eLxZM&0;H{)vmFAQ|>WE_?n}IF&LRw=sEtzNlA1z!FfZc-7t>po3hHN z&*v}NH`jTAP!wIxwy3KguU$;3G3rL^0|%146_otFGm9Sez<3-}h&=mgGX3~MDM2ZM zDoYs{eQ;J1rNBSSSb1PHA5;PusQo{taOm2^d84f}yL@XVP5C;RK6#)N)vl}`e*hS> zCIr$`vy)`KGjQY0pp(R_b$EB>Fo2d^EW236p z@rh12PjQ?yQWbB_cZO<&uc})#fAx<5?)PWh5=0|fJFgkKUiE3{9z@jDvnyM?C#?wZ zwHq9z{JwU9%93+7I{7?3@JkH+dDwU%0avdlfG+^z)|IX2Jbcz&u~cz7!RH1$ah34R zT&I3$NvdDY^?BS}$J6CO^#h6kUrz@sAN4v_az5f52kg|>9jE)fK~wFtwQ2>x!AD$^NB=S ziily8oKENSoQwP1&gJyi#ebCO-VmFwt&E{DriSylIJr&;h5K2b<8osT-K`(U`6@L1 zuB3@>FT+s)4Ow4^sZI<>V@GkKy8n`3f3pLzbRd~JHir)%rcXcplwxCJ1$6)tWg&WY zBYZEh1S_$IdU*RYe8yWI5^r zy#4aaFU9Bmi0nng>(HS?g?}G^{IT!{U?`%GTefUTzy0q z)o-+6URKKhyh@*-7at!l#v>{!O1M|4QYE_Mjyq`2o;{-PCr_Rf)?@DF|8wQCmv)W*IdfoDYJnt}h^x`95xzMRVXx#=4K-@SYH67~g9gy7{w zOiYZ3j#gkiyxqu$IPJFt660u_T9@cnyR7Txno3hBVd8{1lMD$eoE`hjD{2SIPm zv)q*ed@C~VafuRcZnLaIZ5aBJCb|p5N*4<)>l#6OsDhAxBPU>c`gSW=5_#6)CwQ9^ zlJ!0ZF)e{kbo$Xu!60SFLCzMNelZ8c&E#*rbX?2O-+TN#J@P>^z4t%>&6=M^uYY%C zA0Wa*i_xbXm0PQl>r=@W%hG7dmz>Fz`*7>@1e(M24RvflUwlqHV_YDW=JEXCH*5b| z)+|BcA=d4O@maqwi=yM5V~eu<5O=f=!~|7p3n-(}Ej;P(B{s7+RXT*U^I;1=n(sWlu+-PC4s!7%>{ae|a>-p3F{iBZn>rvf@aznu|q@j^_4?gCXbgJS*q0IApCe?UPl3(_V z?C3|JcQq>r2Jq3$;p?M$d58pDmqYqj3!oR;gxEHf#-BDkxH?+9{>gvN(4xMTbNF3V zC4ksp_Y9-{zn>Jmzv{Z&%#W-UL^nrpK8oxxXdV59wAUv*fU0T|&h3uRpcH(^{~wXB z4<}H^P(K<{)&8?~-B29j#AlCgKBZgbwyZO^r<3K;&YS^Au%?rkO8`Ret5KHjs#cc1 zwQpBe-QSBoZC=@KwWU1Ny9BN3RKrq^g7tB!)a#!k z+I81bp#;igpCo7|r{;3GEP*a8Ll=M}_)}%VGC4)GBobZ8q*s_qaLn8WY^J272*4P2 zk!(!1BkRazOaZ2PD&Sv*ZGd_30Y1S7fIj)dI`jkO@E`gR^b0z<71%QS2tUv^0C9-) z!#qxsZx`)i9)dBD;|L$IEXPGC@E>&L*sD{tBm0hW)Q6bsi$XVlvTZkNmeueLpg#J5 zAGR;X2qOK+i(j&?v?FZ@!p_LsKu+Go{%(frjj0ucQ8*tq*-zzj~`@C)eROc+=?Tlj-(8-Uj~O4oyl@&G6#1ZJ+UhsZu#6 zSg&r~l!s1UpiC1+q)SM4r6?VKenTG(bvZU>OHG9DUIxcg73vVKgN%um|J$tE6$rw zv~!-X4Oe0zlSsE`_1Gaf(<))a;QI*^1F^+{lOU3)4P>5(oNdn`;*Z)Mo@o|8r!XiaxKs}a7t znC_!u8PCo;I`6}nLZpICg1Ie6q}|-@hdbD(BFWtd^l9Wn({EueKuzMn+*vBpE~pU* z;Fm9U;Hr+hwtCqTbb|+U4)FH~(?8hBJ*@-jtF6ftpKg571zh9FE}nn=f(WvK3?AFb zJO3n56jM^TlL@U#QI*oxdmy~XySc7EeY{x@PP?;Pv_AJE5YBWS4K)-A2RM-*(?1&o zpTC<0pWPRUESs!|)X)w*;J#piK=I)&=bU(1$xeOBVR9t*$y@iiuTaqgmbON#+ltTc_qD*qLLKo}cH|F6_ZUpV>1npo9&<Per`CYAyCq7AfV2BIAnJo~rno36I13*Y26fInE4ewh2B zv!yFTJ+%$!U>W7org^>24$9yUwhhKlj+0JT*QQRdsO;(;0eiIN>g`j3dfu|0e#qAD za6AY-&dJG&nZCWjf%NJZ#!op_CwCn@M-6H??bc#`+3@nkL_)Y>*BimDi9?wl;w9(J z3+(8kdr`jYkP#S087L{ta-|}C&3;_2<#J`Nqsry17rG65&QTo()OEBM;8vD0{a%0S z%-}nIgS&duGaskWPNss;NjSIj{v!d@tBvD^w{|$u*xugs*>6T*a^v1})SRQo?|MCn zR&%ttJGs(eFZ=^gt$9YwaUHzJ4>BL`A`Ks}3i{TwNxlX{NX}-KUzww~Tu?E(p}7}% zD-qs^Z2fJ6aa_%HFJ<|!J(-k{a)E*r&~3-jEc))Rbeg{|oiE6jV%<>}^m0Sm>!W|S zAGPimpwlo5BKxKM-{KU)&sMC}4XQmGadgQ&R?z4)e?s2>bw}bAy3X02p|V_X(}q;@ zbULP^9|4F~C7qwv-db#Yk)p28&-w4xr_%n@)@|W-926%uAF~G@6g3Lu?fcSzAR5$M zAGxIJs`JSCIlae+SkE14GU zw!eO<`bG2l5g64hfL0uIj$tLHVR-N65Ykby=q&6!ZNI+&WL`MBJdOMDEbTq((5oC& zoIYav4a$24w4lu{5k6EdzTe)zEmnP8Z#a$~zb&HH)t% z!ndF)9qN?!qAxfC*IV;Ni}VkP+CJKqAQ!dMX$%PWiwK|~o9N97oU{(0Z$Aco&wv&{ zk^38#q5fRAK^dlDGu^u@YKB?6$mc#)0;qP7{W14I!?JWkWq+Eu<`l&-;+}zA>NXOnT{G zW7PWSs->w*NWpz&VdKg*TkI()rVfBw2NVI|-vw9(AhS-LIs)7qG-wdjuU}t8$O7bp z9;OhrK9)m6Luu;Nse<+Z%PM!Os*8Db8MK*~(o#xfumjd^m&sWr}j&;Zm zM=~$c*^AVpbqVU(+WPkR_cd9xP8e}$_M9-VP8)6rjw=rSRWE~QKK1dJ~O*#kPoGC_7N+I>9C3R4YZ>&_b5b{Agtx2b}@*0b{;O~s$x0F&*-aGNmD3k z9d2yqML#j<{M}z^v|EW*4dQ6zOO>0Nc~fX9tAAMx;QhJF`XHohGs}L+FV(#(ULW^n zkM|9*fRt(AK3~S4cU^|VXZ8L}!p|b^fNxaDpj9vb^K_8lS07qHyrs~DZK`wxHlWE$=K<>i5snmz->7;tu zlC<*9FagXWlD2d%Y)gAX8rTC3ix2Q;iq2aASw%ZtxG%*P@NH7wn`*MJovZrLwC=7y zcdimtl4kY_6>aQ28%aCQYK}Rr2UkZSP6bc3ElYu3#`AqDgKzI{jB0p+dB3awH-NMy_6Tty|EGvUm8hKsm_aJ>vBHSB-r)K2Wdl24<}~PjlUhE z5)v%njoGn+FFn~bM5@?M>y`GV#l5P~n>%Ca=B39;yr0eGb-~tnAzFcm>4D*aG_I!g zg|VHRw^b`mXEM&yws-}|wo^8h+wu3R(Y=j=yh#_~8=dalA53OFS}bJ(e9!WMvnw&3 znv~YRyob@lshRY%DH&TMrhzB|d_5X+w@UcTUUKgOXalDzl8FI$lzRW8@#Dt@ zs0dvEW0fmc7NBU`wrxc@0AGNl(2t0S5cW_8pbP*XY(zt$)a0l)&BM4jQohYNpE z4#5<@A(u!FHhvN+7c9>x%UVv1yOa-5)tXxIoIKk)WK7vqF-%;N_e zQZSEgcITaU8p>fCz$N&9A*hG(L%G?8v}K+`5A+M*2!?+U*aLW-8<7Aswh{bA5<2(+ zu3Y@&{)9ea`%B4{Tp-Z;YEdMrM2JGWG@30q6@q zP*3g`*jKR+!Z!8|_$K#n)ke|$wKD?NCzETZZ~5Mn2Ar%dKm+Q4zZZ#?ri{1V>>|?Y z^)FrDyc~%-Pg^*`Qw5W6Y*&(=elL|4t<9p|9gGkBi1MBIs^c^Ft}RMXQw<1>By!7{ zen1s}zW2~Yueyuyy>CeFZidx13*&q{rcThwdIo%N>!k-mjxwb~Snzf9%IoWAsc%j4r+!Z)55IpMa_9vB z1drvi6WP4SuKE+tNytVpy~Ec`@6bR=R5tUMRC@hKBb9^MP6F@aUoKAZ=YnUe%#1lZ&@22=rzm(oTR*TK|MrQS1N1mOR`B%Z6mky z)yfsFOGFKhnAb(5uG*V%S-{@hj^Cx~f4C}{-XCeb!2?ywJIj;l%|DXJpQ!{Mzafxr zZ|BeZg!5(@ZG#IM3EVtQ;)d(hx0_PwtrdxMieK6kN$y7A-{>qY;0r627ziHK+#XbQ z*U~F1;+8&SmLOH{K= zNekGE@%i(3TE2k2J*xOp3kFrqlEjgv%Z?ehr$675tq{wy72aMcs}^xp*PQQ41wdXO77>T?wDlD^?|`|?QIoE_!7D=w364ELl3r%o0> zq|5p!9bYeq9^~j1rmO%qEl$@u|!%JnM8>m(mUK(L4R&t!M3E_TuIL- zygd&9v$Tn6kcrPLhC%C%acM3AU$wrg`8cMzSamjq=^?CIO@e$a(Z$gEJI2{(wbteY zYfcPx{k1f|di83NNDQDB08u2;diddo1>l8#;+YI!8w7ycYp=aVM~@yAz!*TWx8HtS zBtv`k)mH`33P2A@(I!loK)?L*3(cQDKZlR^_S)tA)3smsv8e)j$M z-&15{q}VoHyLJ_0jl^`=X7B?*DiY+488b#ym$tDEe*F9Izrqgy`sizuCQZbCwtV?= z`uXRdbGEPCj__65Q2jB_W1q!7gD7i&+OUlm7~{u}7s>RNELkEreFFjlL>Uqf<|Rtn z+-}iw;T(a&^)z2N$0*0E&dv41wT1!T`)~CjaOs-m`=yz5B+BvN9H;?{rT)(=GYx<* zB5|*8Q<7%?nqdKadodVXy;3nc9C?8zYZ8pX=Hj)PWCn8Y7@!9;uM~%G?*|g;OuX^N zQ;tW@)N+$!)aUa&t@&8bnU-V2G}BeH9RcX3@mG$`HQ;#`M?fEszCizbf+Gtx+wi5& zQ%ELtGc_f+c@Hz7e`^opgI8USXdU0rhu;6$^~(UWy~4Cyy+UzLyjGlP4mi4!0oq3f z`!hf91$ugRGWqlVi***UY)^;A3tuM-&=--3vpK;VxOmO=ac{Qoq&|dmKS$ZS6VN4$ z?CRv&GV3QYfb32(LPPtZ^BJ}bSB{fm0>WqwEuNt8dlnRuIN%a0j*ZVS83P$tEsO)X%Hj)YRUmyxP2!dd;dDUw%nje-tp~0J!+vKD(Os3F(_* zs?pIs$jLejcBE6-!bEz$uk#c6GfC%Z;^YjfDVgqUmH*uLeQW!X zugxdjNgO@7>S*q+Q=nkDueEcZRr?FT_k&GEJig62+F2({j~KyNosPRQoMxVFtB;^w zcqo~v1o&E~oQv*ltiOk{qb_jQzTJA^SL0_V)o^t2S8yO1ExEZAPC3kheA-Mej~jlVt? zZ?2lF0(!AawKl+y#!?iz~WXyKdJ9o4GMY+R8>h8vIFeaRWr zPwFejxYocmT)1!FJ^=^<^nw5o1-Pb)1O=f3kaYk4{bDshQUIxlg8b>HpK=n6A-WPa zwr}4q%B3wtv9@mAT7YXSR;&=kVPTxEl=b`o06+jqL_t(6vVVW6BBm0ggN;Ln4$;De z3v<>(f7GZ^Im_w{08;>q>({Rr#fVBpv@W7SfBp4W0eWHul5zpq1Aw@A@!}j4O`A3q zK(=fjD`em0yj+KJIiF)7moumjI{>Ny?jqV60=k2qJ$nkE6@V=SeE`*ioEI}L{t&4O z`@44S65Cg*cJADnx^(Fx7u`+)x*tA#m=-KpAans5BNAHn-P3V6c<`XujuO-cfDKBG z0|yQe096P8RZvr?BF7)?ojJpaM|nKtI3cmx=+UDI`|$6-|DKbu4&&6XUq1nA!K%J`2 za=9MU=A4vl^~a?sIgK-O@~5WY1TL%jR2;gA4A@->!A)?nVs6@YbHk$2U3k)mTH(cP z`Fn>jT>}6|vuxt%>*vS%QPW!1!(}DYFnqrpr18WNz*g`Lwq|^tCOCXco1!=GALL65 z8JIuD(Y?75KYo_^`sq?s(#*MMavjmEI_by}&I_jLAI7ErpW8F(weMX=1KiWskML6k z3EaMa+IR!1uJ>eqOiy3JPbULjh^MWGU4MSpmy^0Z!^z|j0i4L6qXUpeRCNzjn zGs0Y{6xh0tSEo=VPMmjV#{l~4Kn6`>u=h-|b6>5-$^TkMcu^asX$aQ50PxhYmP{S- zLch`knuve-w=en}os2(Eu|<>)o(>`Xb1Z{CUY|^NwQ?l_f6%MYf}107tSi`B-T zT^2{tDd%Z&=O9}UN!5Rk6Roug=i|7N6#xSTg_NNU?ZJqp?4yT4cG8^S+_eLI!np62dGSi%?1k8eS+0vtz4)_kM6F~LK>R3f|k5V(C89JA6!Nj+?jog4xD_o-C+>r@)8WjQgkiX<3?7-}oeG^ ziw@5ZzW^rRR54KeU@wuQT{py}2~J*c{T^U&b8liMftR-=oHNOmxb&O^aMK!Emkxof&<{YB|Dn-5OeiDygiDx^{i#E)uF5(&1+M|PzFkeBDu5? zkRTYQ028Gka#4b8vP_*qUkU(USs(KNQXw+Z6jRui02%D!2VE)9QQK4PLq}bQWoaAp zX1mZ;*P|V|44@Jb$`JSjUDY<`p#xFNMS=3CuEV_9U%UDkHF+FBN392c)G@(&h`L?i zn@-1}Y5kXeVu~@4AiiqTTFY4i|78CF`bvQ<{8ayCTT(|iMIAZ5SdV4YQ|qeR3p(mL zEStwbj)(LKIuP_#_7}@$J18@Yd0eDz^b_`B4`rU%GTSa%zPd)hd3L(G{J!ko26ALx zv)b01;zK8J05S&)e@0z0Kt1i@#78u)KWF%45TzzZJK7~zLuaC6FAARD#tNv|xfugg zY;(?%40`TPBe2%HLrJQ^;H6H^#9yF2oJ6coNBs@Ixz5q-2xN14K`;OMmivd|{Pv-H z3oBrRHbBWRkwJ8wG-9C8-`BWprQVK%=V?$^FY43I%N9_NXRg}U=A_ZI@x})oAO4K? zr91s;^fbrYPifQL)YqH$#ajA)xT}lx0LdFhxIXlw`;(lnxr!3bOM*k_IuFXTIJ@@6o-T~Bs!QwdqT zd6c6s0j$3g{m5;bQx_!CN~Wzi9K*>S8JMjfR)QK-EJ-bRJ~YsJUCOO}=vU?>S@`v+ zV>}Py1;D99=BejIeNjAJcO;X(WKQ71EO;>zq&a7&<}np}3V#N#6P>*&OXF!MG;p-e*_uKx{SimQn&^R6=-A23wTZOxWCqRXRhId1^*8DVVzOxZs(8WsyYHcJ zt*PB-9Fb2rr#5nt+$}eB4zi)G3}&BTKI6(7K4a*Fa-um;1rH`^3Qw%3^$NRQGfjl= zHr^)DoFLEe1f7SDjA@|1UPLo@Ex3>e)YR{H=BB_^n_u$P^Ir$NMUNOWl{9=O_j*k#I0)9&En|W!%0~J{0_*Da6iRkv?4N<{y3RJfATaw z`&jH##h+$(4WqW@IUBtPHzJ!kq1ruvonm^9i}d1_7+MvTMzcDUr}8#Hbc+yQTG6{I zy}b1-eeiF5p8Id^V}35r?Od6^>3~SbYSpR9%E1{7 z+YqcrU!*?#0F478d!d75W$+PtjT$u){$aU#_3EMwytlP$*A{+22mGwkmh=TvwGFjA zH}mGBS8WIFA|V?hrBRMCK_W3EltX{u8^#F!dMdDkA1E`hZICPtV-K!c*z;5{j_|K; z-MTq`U>kTMa@^ntwj=f(^aFliKf}uf_=9Z#pP++2>qL$L`hx9?WhB#sE!1h;xUty2 zpmBhI60p_TGMCA=aU39u9d#pk|HOU{4rJ^*NJxmtX32r9+K|T+`dK7b{|LB7eXjog zrTIJ*E!m-`#g2KCy5>!=|jVB@XFBxEyLXdjStT{hX4#i{@9R3D&UI< z&ZZ19|F}GhCXePQMFt4ZGMGDOaVCAeD1(Of^r9vTK{GjhbM+(m0gbC5MR@V zq?*Acsd7+p0`RJPA3^%`^DyD`eKWHgrTLq?1Hr%N!RjeCDP zYpiZsZ(PYs?$%o>6y5@FzL%^YY<wU0=#H?BL#iVZscgH zqkoN-9uQ~1ARD?Ioliw&mo@?toa)%Iqe#Go#9RPX0dU#zlLB0O^wCE}!ZoxGKn!3Y zK)fkarU+J;(g zH;=Y+QwMypQ>RW9;{@;#U?Y44XD^b?LEt0$=c&L4*a9am099-QEMt3M%&}gI&R^+2 z#tHt-nl(#|5&XbQ0=Zt=&`qHu+X0malJ)^Og-vWz*unM%=!@~kJ_9gbC-5EH65A2r zHTG4E5%xcD>Q-X_S=xpTP)116^-r$D6zyW1!AFg8fQ0!N^_l18`2Q;PC+|NXam(}Tl3>zgaP|IglefXPuDeZS?` z1cPld29YF^5JBV&CWA2;u*vpk11~vaa(ofV~qL2IX{0 zfP@TR|7y2pd(zJC?a9_1)z33GJ>At6{yn-o{jawAKRrkN-A#O{i)5SVvssr~Ec<_x zqbGjnj<{l`yW`A7eOZi3R?;(X&31ckmHwD)QumjBJ&m71|M%BfuE(W6_dlH4`7#>c zx$At%g!^BZw;x4M`tw709_H&YPxcv~OXZX>$-?06!E@a3Ix=IEI=-mkfV$+GTApuv z(Vrqq#{QT7P{*9|3Zb4CKKRCcG_rO=?!95f|Dg}{)1Ni_JNNpa#D9vDLi6BxofQ|5 zL&%b*u3O%y?-L2S_J?-of33ukk*&nUB-`C^)nJ2!wZoYJ;$^nySyMtdC<0kt*a<)W!dgLs3*?W`S z`J1HeJuz&$yWpM59~|!T2fh`xI|enm!#geJ+WSMvsv(=_uZ)}Jp8j%%`^}2k*|K>)|8w*VcjpO1ECF>@B?-)A8t>|m>E}D_aP5Fl}Jn?$s8rkn7zIGctIWzkZz&;#yDEOKX_ahm!L*@@n9OS zekZkgf9^|Bgh52bC|*`HXNk){34r*qvIx6E>vAMM;;+s3nQ?tJPS_o?5X zz0RKDZa#HUxAl4pxDP&`>rTCCmRsLv>xLw75=TmTyxPC=i~EPZ{6lxIZ*gB+_ZnZ~ zVT%7Ffwg&qz`H}fb-Qg;8%aj3?aLo;>?7)V8NM9YGSQRIiKkW7< zH)7(v$!HGaPxiCw_m1_YGzNMyJp4~&a$ReGe)5B_-EQkF?Dp(Z+aK9R`oq0m`1ou0 zyg$h9-H~%*2LV0jiQnh^IG;$~)E|Jin--bfVX5L@1^C7A%zrewj8CVwc8FVz z|Eu{d>)})9#`ZMN`JGn#i94uE{zryCo{;{XB2Sl%w=4o#@p?uH$fiPT-x$CF#WG|8aJbKG=KJpO#4_|Kb2(09i9vItkdJ0%=yk^Zd%n%rUw z{>c4#!}x#V-x&RsJM`tTzO+Ki{!Y}*+go+91>Nu0sFb*P!_H%OP%5YpY< z(w)E8bMLw5-0!+)t>0h6z=Ac*JMZV&&wlo^_ma-{$AVZm+Bx+t9{-{mcGCW!2A}LU z56=8pCJ5stJB zO~!c?(Lt5hRo-paJOwHE3{|NlzIfZ-2C*Q}gI7eF!TJDyGNrJHH9h;^90wVJe3!NzBw+I$b zXR70*MycE7fn5TAHbdw7a{}$B#FLK| z*|uufq0tCr5ZwynT6~*bJv+s>*LL)G3*nAJrv0Q13H#NX7#AjD78^ubclZ+DpX!*# z^=N6TO(2irQ@Q^W>eH*rekNr$f2S_mYB9Z@m=_dE$G|Jl!%sn=v>Y_-`B_l=lH93( z^SnyF^Y!0VzIUBEJ_-lJP+E_>NurzOa{02EW_Ez_Ox)$VH!6ALz9X93gX{;NQ>@mL zK6*u-m>45DeAF+^csL+Ex*>+RE+kadsM5 zs6q*+8?xyR)2rtZ64u3^A=#C=0DHQG^X+qC)Yrg2KHI-vJI8HDTsB;~$J*Hb`vzxZncVDR@fYR9N48zRx$(mT%mAD08PVZciE?`cubm#LNUO6n0jt zTDk;sH0iho>>90Fyygb+t~3i*Qibdn)E+KJVysg(phPTFsrNn6Efr}X&pI_a_1&X~ zozcXDh(^YB*Jd_}%3?u6R<7?y8|CFy*6AC)mvyabE}69>C{cII(hqBt=MQ_bpA!#z z-O|PIWbU`(K^^aY9QM_Y}$FgsTaH|ISt8?&GGB|=p^sd zf}B!qrqOt(?^!&=bx=vpHb2gbt#5I1A|g=HG|L>Kmn)Rj>iGREVduux;*o^0$m5Xy z^s1aQET(urrx31+5StQ8dP zN!y>1LFdkiI2&`LG2kzfHJh{v1o;-en_q!zA6~Sux4poqJ6Ah%(gR;@Ve~w%oi6j~ z)jRQf&&T(kQ~f-hJ1vr6IPAVH{^Pcoyh46w6Q3L(_3~O-?`G%B65l@1Iy&9|va^Rx zU`MUvmbdlku=w4~5y6g<9;28#=(Q%e`cRoyC&D{jm8J2rH=P`my#y6)V=4Yo0zqM2J2ax5H` zjC6+bS~=QF?#954mT;bcjIqtS4n2}R*7@R(TZSFRby z#M6trzFo7~3|v!M=~kqZ-J5oMcv6=PP2ZPOckk5}&f7}ZDV>Y#rEAteDw9dsxj5u{ z$LZLv+25e7Iz&@$=yRd)+IzZ)TvB*jE}2ud(h}kcX-7oA)RLSYnZmzt3~h|I0#%O} zTAIDPPp0C-C4tq36LUyyMh2#c;y?R(wmpLRkrD&2jiE(m3ftG0+G!hnGN+> zNTGyz5WHOlQp7VFO3oQ}{PiFLhIW7VPUFfIk}EH|1bfravkB}V{Tr9tl<}g3_i0mK zQ<{}Vb;g#JQs?FJ%To7*%>hyn83aY`SsJCP^&wIyEdUq<@*i^RyU&BNlwt) z2R)CupsJO(Ru8upJvF~71??Iw9t>;yjWO!vC9>IUMMRo!-1m5^FPA>KPyQkuiW<1M zEaP9|a_n|G_;$Q>9sP2tAs3~J|GxE$({;06W8k*f7Nnlkuf^XzTnLIqc`8N zq0-o;lE<4f<=acBz})?wFLxr_oM6BPX=L+S@5})?GYRKiLh^$y{TtXDT9Wl6krsRw zQL~Bon_jZ-mx5LEE`blCyCi|G#;CNBd)(PpLyPw7n!~NvT8`Hhb5#+KkZ`OC0?^^WS|CE_NG#_xm!f#DJ_3Nt?>3ytMiI;lJEBoG)lQVaU z)MES1HKl{I8}BMBcOCAl+-{89B7gSCTNqIUdw1nV4l$GUD3f<9?iFypQaX@MUzzEd zG7VV&v-NRtB?d{Tg``U-!r{|i=vhTr%1Moznb+e}72T!wm^K}L^!cusHu~$1c_z1v zE5o4)i)rnCit<$h!LJ1Sb$)#Ab6xX!c&%}vdRUUTi9;#c6JMS<9do#UzxsHy6ru?} zWcxU@_xe%3ba3Wg(6{X!QQ%B7H~+(}R#w{4X>9Q#>Q}wN)CsUF~_8?f6wPi)$P|+gV zZ3$JrwDuM7Uav5b6X~(my&KhO*>^3U)+Bp=!J93 z!`>)4W_z?ZIrd*@c!;b%k3aHD^!x%QRlIRwpE%rd??_(P9Q-1ExE8D@x%$0F4Ut)U zKlD?;HNq~!x;dI;`Yep^qM0Re1D^@ZJ|fTwv3fd=hlJa@M~gRKwPsxRCthRRQQJYN zSGfDTVhhc2a*T037md*)iFknTJpO7H#|DiN_$&hsKA#}vayGtmN;<{r=u}PuO)NHWuTfGIX)jQ94nLNJkl49nPRD7(c|Drji$Km>i8YofCn% zPU4QHPVD#oG}3oB{U#K89tPcu%KMBos@Ic0dL&fr%SXN@;MD=2qfqlnwT^6G~=+o0JGp5RXK?IwxCoZuTTxS^@y7?{H$n8RkDqAvwy*u3 z)h>dQ)+zf|D&L#)l1@QS%g)p3)vo%8w zvH3AGzi#N`XN|2p{X836?*xR}0x|{zorPD)zn-$!7M8sh=$wLJE%kqVz!r&UO1x4B zn*BS1hxY@TH%?7=agA#mq`e67!8=czt4O}w1~K@B!F0FVug09Ys7txucSHJhU`Er^ z%)C6c@ve@d(A&j5-!F|EkJiPv@75wTSlaPj30FQY<48TLjg(U!vqQUsd4ua;&M{M| z31|*EdxRvbDkRsCyVGb1KJ#=oPSF}a)SG^rH9Y#KhPn0Y-2J_D`V8@-2s8(8k^K9W zi&7W2qa$sYEMf~@7Q*UizsZ8M<$8nxB`FD!dF`~D=CgjUCx5dG-rG|JF;K8MO?ie# z`_5#_te~tWta%ba|NKEeh{mR=&n0DbP5vT2s9el>zjdVK$I;Ew^~hm20i-4?&^eiF zr|TvNKKuT#bH-zLzi^!%wwY_2U`Ne?l?0u$yjWG>np;b~WVWn~_*OLQ zhVF8AF@6IrxQxTj?3YGwVl8i&`tG|N9nX>;x|1T`&BtUugZ8#Jw+f^_zPM5T?gk#3 z-Z5$tnHa{7V8b%a!hi2Kd(wYv$Jj!JgghIsWkvTja%VWh7vws<@HvpIlc{r_cdwXo zotXFS?KUBo-7cIP|W;;9rT+ zn6au}fL{UHhon*!R0s9))H zys#W%K8Ij#U{wz?n|B20#dd$-(x@!%Qk9f@lXWX!Ogb#C4N6;nax8~J?@^_<95^Vx zMoY#crR%7mzOp7>+g49#(3C&qw-yafbtly-q zfT!Q1sNQLP9@L!s@WCV7VZLz(|B-pE?k7&P*hnGh?M)61NBWftGlyO|mEve1@JVdPVyb z(+O7GlA#*VzFJiH%@$>mSYk<3`uSXt>Z{$Lrr`KaUa>tKEEsFe^HY|E8wEd+UjawN*C7K4+c6Jg=xBLnS+B#|ZuN(2LFd9XQ2~t_iDboQ-1-7u&W29bF4HSx0Ie)Z&2(93 zHF_qUcxpyr?fzcA_CwEQ;;G}6f^XOxaOD;fIO#KGMFS|X8k_Fr?_uvXq5e+Zggla% zkGGM{dfwsFH~Reh_`RK*W6(}_%-5**3E}$MjltKkano?KpV`zjSs}h|S*Q89WIPvH z*KNj?dsAu}sndt+t0(ptSCLuutlD_0$C3u}VtW;zsFcUx17=;Gevv0?m1AoC3=dn5d&aa}fKOVD!qP2{0jK$t1X`Zep{)#eUj(TOUwbp8aG96d7q% znCy|CY?~^f>+a*Ha-y^WL01v9M~n`)SKVZs44CR|MRs9vI;QbmL;{is;>zjuL>`L| z!n;j<6sS=p4HLPu`YE|<4dGwN)=Ez@Ji688RV!}V3H_*tlE}ygGrn~b63Cu=rLTr8 zW@97vniw!!-sgGbliH4<(uT#jeQvEiMUx#MtsL3{TLxUjkQTO&$HYElHVe9!XMTn*nMD;$q|CJUmEM))ki z>mLu6&%HhLJk|yeIl5k^fQHU2=#Mo>|NCbFA8?;a(i1ze5-fQ>x?cjfFgl$CK4oIu zUP*0Qqz*`THgc+l7k@Eo!sTFG$o=+D?E+x+W0c}sj`*PCemN0d!$2dqZp$AK)a zy!Znx{%%(*;%@_ilngWZf4#d&RPF($nJi6W z-U7b60k$Dgjk^7(M6=^q2V;2=^X+~EGru|KWOg?zzpz&Z_RdmQ@t^USA389X6B%T9 z?7DReISj-=ngKRMS_W9jiPr4eUjER$;IyQ}>Yzf)tBzzFX6=iLjqgJs_;beCytQYq z>rOfOt2GJQr-!)OFlUArB5k1e$e2(6@i0sJECMTG<3iIsYTVmZ!#jF}&!SJ;X(3tm zpjT+7hPrBUCgGnKZx|nhhW-IfPqML}xm8=(x19g+?$qDH9}UCk6pgD3*`=9q?MKO+ zb?brvKo-4uR^Z(6k_&C6|3iMnz|dB@=VB_rM7`<%E_lBqZw36wp?2Qx+ulr7UZ@Bb z6cFMH=YQRs{C&Tt_ZV%9=xDv$U>3eA)NTIuA&wa#bY_=liaCEyBz)sfpld^*ZQ~t! zyb&pa6=jDE_@b6K0(=!L1^3An#tMnCf1zB^aMd6$Jul&5c?T9Af29;h{ypNy;Sc-m z;fw=-$O;Xh6RZG!<;4og%XlLZvQAVX%z(cp0F8h>>_T3;$2(~XGe|iEZr=W~nY0|0 zoKhnE71>@{YfEhAhb@<_R9YA-9^k`T9$M8JIu>+^pp1C+OAUqi>Q}oJ;5J)p2sVjr z;Rd4Gp2vJ+YIqw`Vb~LXqN^PJlvJcL{l59};Z_XQeHNh5wxy#TE}Wxw=tUCs`pQ0T zro0?nLL*$^^E`}j`SP!OCcuktt)A|ni6F1e(X--*mU-gy`FGzQr|-wX9yN$(5AP-P z6de2nhpp?hml~pEmsGJH=|5!t?$$igYi3oSUfx@Y(g&@RIk9 zsQm&7hcy(>mS-i#y~}bsQx=IIkV4~1gQVBTynOKyovxU*7=ulu!uPP6UvDD9%xUr+ zPJRnyh_KEkD)pOnWMef*@Mf35JoZdQCnLRBEW+TIJ>yG~T~uf92;*)836csE<=dOk zi0NENPt{Fo>;%UO8POe0vMLFbr5ozkKqTFQfiRAmFQnfi@5UR<(z^7qwXpX_|7xNi zP=*+-bl6!eZv`zaCsCtzKCR9iYN?h(4n49=y*1V)`RADu(1wlqbnw-#pmDf1uIt7l zkKlVM2JL@-&?!i~8J+=N;Hn>@>0A@?2Xv9zf)116A}HXivEJeWw9pDoj%Yl zN>knL3diXAbv`gPcJ%E$1KNE=d8QF;+U4JPF_E@A;@GQBIK`s_xWS(r&tOayS*=%> zof3mD738PYX4X?VIx}a=ixuJd$uJ`<7<@nuYs#;k2CgiYJi#^BDZElk#8BhPj_fRYsxr_FTJ6VoFFM4-+c$ zq{UH<@fotie&w1YqW^jvFBBG)zJrl~bM(whYTk?}V2hF%5dnTN?-H9p&g1S~d4RfI zZxeAOM$7TJcQQNsnWLM=?KizU;Xa=HlWck3G{+unhjJQig43Zd1xjTEj}+|fVYq0p zu7ipm^5~Z#Ww2PK*$>IP!+hp)JR7eHQe{Gc;64`yY1OHX1qdUMH0VNyk>PhLMPKp? zCsAW>{O4lS&Uisg?57_LNJRCqa};h3X+Ou6r0an@PtNQ`6&9JczWuCeUcKp9E;@9a z&e+rAx?itBnyFs&y?DTE7vqRC0T5;grXvvwX*O85^1zf3X^Ju^ZMc^HmMig+yl%^y}?%o{DU zBl$z`5}sY3nQuiFmor*q*!;+ns;=!(EFzKXlX=b%0@GX!lr)4|OhfGtoB11d#`p7p1QAbr^ zqSA3LBhh@|AJO#67lvuSw{AfLu_-3wsMWAtGzO!=?<@!%@W2$sse5i1FFe0p{A-GL zx)_wbLmVX3tB@OJ2F+$)-&f-xYbRZJ16E=+1y^3umn+wX>6d&p{FU)wFQzMzU$!l{5pc_;!`y= zB+Gzxf__xdPXuEos4LD6@Eg0jo#RNIZR}7RpsA=FXGJMMpF!j4eje4A0lO&%|54Le zhY;6C<{U6rg2JV6_cWe;$=dW4CvdX;|R@@)&X8k@1=_Z4AvM`ao97|T{dP=f=P zK#kKB$#|JD5r>|qzqy3>E8<6=46qH7X%)8%ZU*c9VGSoQAG1(cZ;$w501Iac5|NbC zB+Tme!A4{yvvkF6!IW=#!}W?G(6iwluGJ2vwJTp8*s-tezJCkFkS5H`bT{w@!OP@; zTg%y9o@dXi`x}2R4_FH^5Aw%o4fYpM&{~eL3X^vqRS${^VBDN0nXGCp?oe~^;yOfQ z-6i0htD*1CBG&#dWR#Q-={Xd8|7DPNI$S2PO$LCLQah4OF$LFc9-BA&*M5_A3Vq_j zyc}S?wD9(!T63o+qbnMlN{N4-Pyber|527rQqexd(-nx}Z~9_Gw;P-enUEogVWdROD#Nw^I8(d@m@W2w)+WtGF-41V+OOzo5V)LIW;GJL zT1J9AAka1xc!I>SDo6nPD8vCUwuyduJ0bz@71Cuh9*7KGY`&P4#*$ytZvfoC)f_|! z?T<(`5YxkiYPv2)PQYcukekLPp`!W9Gmo_zjXCjm0>rdBU>1U8uixDu0d^l>aGcT+ zUdII(I4z3`|Hjo2dTn5T==p6p8D9fnn6AsJZVgPErdmBcAws=DUDg+wllTW1=0|aR zz#afTf6Lv8G(@@m&<{@z5h~D+4`X(-Tk$7oK*ysA@&p1>Yxs7uW3NMUV`u!=-G9qHf8}rGeC;ViJNrSDuYZV~`WJr!%YyjCt#W?u)ya{1|gy zO)pKTYB7%L^z->(db9wTCKH1!HsZ-8Fq`DQ+f0S9IW_+I8Poh3n_YZA3@oVIJtJ}! zIn(df!{KrQo3X!0x0STA5k{>9-GD9uB%kg%BvljSZZg_erU$BV)M9%ke)V&6foUdqCK-$q}%EL}}(cUD+V>|02 z#@sRGs=Oza{(NC z%0D(`Yhg>*`we<7;TU*PcplZ`*@a9pvD!Q4FW3N{=O91xch@O1B+i;uBTZBj{8YQh zp{nyuZvTDR+^6O5SjAD|fQ&<=sZ>DDECDr<-f+7s(@i1U zs|lBGVVAotjlF7v2q=L)+yQ(Gg2N_pF%m!omHf;N+nG2JDU@nsujmb6GE)4Mn^yP{o;^eW5khk2d(U2(73bQihp3Wj-Ic^Gu{a~jJ9wj0LHu~-v>`AC`U zpB;N=HNiUbbeGBDVT^d#iV_Kd-Y@*pF^*7E!%)Juq+;C*ughpYTaa&y+q41c(-jNs zR58+x<>a*v{OLm2Z>@k6|3p_Tx8nZYt%APqjv17lGyPEHJtYnC1%xZSP3d*9c?jE- zI303}G5DEkAU4Oe3Aw5__V4m2^@UI;nw$(GUK@TBvD;u18Ja&L7akl2lzrl^B^6T!f9zIq`fXKt7D!Ae)I98EAzGScEZ^c%Lfa z5E=X^MhYXYfDeE**4vO$-tKuZtc5Bv<7)`$W9mULAS86AAB^cLK~b>*Z74&ZDaEBg zCW)A^k*B3%vgjY-nXyP4ZwWh4vT6M2Hy9GKygAO}69XeBrit@$e45f!Aw+t=hZm4vBAgjuX(Z$)+5Dn{)m(+4JTwr9rd`Fw?G4f zC5S=PFi$Qu`zw7X?E?wCq! z;)gd6=sC6rp|=*jk$$fz+0KEk;~T3dQI_e&9{@Y7lY3l!h6E+@U9ZP108Gjs02dwn zt>lmm3b!GE1)k2ZDWvV!3xWnU18VttpdHa*wSv57Zro?(dG#_~&;48l^1#i15Xv?P z%EW$zFhL#wf`r8B{(P#DHXS~yK>L$k15|2Hd*GB_g(CCpnz(J*N zQ7A!J`*ByT)ef*@DHteX)RDemxJ8Eykp@nu0)k|#7+FrmsDZ{7w#YKj#lGQEi~Do) z%A{Z-ciJ@POwc{3QOmL>u5sB1LmO}=TmTwBW@MIIw{b;@dH~{${mpA(9VG1hgCjw@ z@^IB7DDEZ|%KqRb6xuYWA7F(=*92&G{LZ>)EzFBynR-A_%)$AT9)S-$D~!{whs>$C zNiDH`M!!h74Q)xaMrX75pA^ymzRLP&<33)|R!JFB_|z~uWFn17nBFMGBRi#1@B3~I zY;TE4s>BTbvFhq48cBsDC-^?~`Jc5ce?$yuBht zrettVnojZ|81mSW+Wnfwp90x4Y%o+CVYNA>Y0em>s1Ziw@*Yz4>Lf;dlA*!0P>tN| z%7{wPyN$~3ok`0Zx!Q{QZtWp|FjTl(N0mS#x&V4P6YfvYU?Bath z^(i5E&vcu@SHtn>AWG$%<((HOw;}?u3PxtjA)qj)%WH4^5RBqZ;B2g$++A`Nofs-` zHUiL<&1keoXe^0rTUysn?~MmcqNok;ipt)(QTSfuOuQLZuSifO`N?DT@667>zjuOE z$P`2nNz=0$p-lhvjPSkGdK=x|8U<9>I2l7|(kf|$_3>3NQLC& zqkE{o2lZA(4(!LSVlu+6KyY3pI>wRr`*=}9PP6m4!UHu$%XDj+5zpzqaYru^)skC=4G5+bC!>mTG5I53Q zBH}{CiH`9DIuh2rk2Y1z_lCyt(FM!eg?S}X!mTRTg75v_<{-PjND@|S7Est-2N-Zl zqP%aoGHHpf%MiG^lKhCmYJO~5^_%mX+2{%BO)%4D?@^9?fVG5PL*^p>?syI9*PnlW z&h=5UD9C@OpHY`)7*{vOrjVMc8cgMrZhDt)L`iD+vPC%rX?~^^aHD1o;i{L2)qA2` z39pD~Hu|ok`ny-4K0v6Zm)PJhzv5uJUo^=&=?+Voj@)RT1vnZb&*KOsau~jK%k}04 zZ=+zm?+l_3i-fa2DC$ze_6;Tc8{^N&OA?oJ*(s))@#sat$27*8(0%Kz!PGZ;D%}Sq z1W)PmM{N&tweFm1)-yhiL*>+^(b2Pdt`6vaoGxR?6tw}v>33xYLDj{P=BF}k3Y z??X5wq_cq8j%bDdChBJL?)#MULiDw=) zUI4c8kv)-K7J(P;D;~fpbi~A~KP3_{)}KCoR}b`=n=|khY+&jWEbjh6Iu}4un{XoE z`SW)8OV*_?v-ur?#1^m~qAQJc5jG2CoLEew+5AlrDUm@Bl`))w5yle;b<73Ct@Rn7 z;~d(g8CNdlACY_8{MBXhs+HhEeW`|u>v4;ZyfiVNmeWa6C33bA0Gb->HT7_ho9KdO z{(KHc9GCuU*ptQmjz+op4Vf5sb!3>jHXZH#hp^UGow0&uK6n={PI6AJN$9>f6n{u! z71BttLh*-U>TS*wmwXrzj1pO+vO-)iqzoBMMe0Ytrm6e1@+B*_l17!FhO{9Wj8W8( zyZdUrHzwy5vb)0vcDpYrNyS&FkPune+ln6z6#B;`;2^w(=@0mV(a7Ky1sbM6(7n)er5VP&Xx=9Pg9C~=ec{P_Q3sv@l3JG@SMybY?nejtpfeJ zpSO=^+s()7!pjc3fvXAW3guXOUrDN?Obc;{R}&j7FjFp6k7|<>K{lC1GS}NpezZ7{ zbRH(dSUv2c`_(p>gD&|TzI&y3Fqj*xq1?6fovl@k`X5|~_CL6g{3cWbIu7vkkaoI2 z`YM`qo*yMY#K%ZzA7 zXTr0<1j5rU9exr#4HFQh(CGN9QzhL#pZSuG9*n^w!O03ENBya|_1fi*7RXTKn6xP> zYiUS|=JQ+c0ekK*Iy|HOeQI@xTQXp0^oT>vlq)?5~NaE zF7(7cfkc$5^UVVQRiYGgZQdqN1&@xUm(gFAw_l4z*)yo(3)t86A@S)Om zA|`z|C9j@R!nJm~6X*UT!V_Kw$SWJJc^h9+;pf3lZBJ3_{tl@m71C;ie4=>V0N8~w zgJ4UnjI@`s2skPBMQaAowBHZk6#m=&svOcM?v}+u1Vk}T^R|JYrbtio()wJu4EV9M zVfP(`00IKgqY0hq5iohF1JIii+-Ew+q2n(Dp8W-&d_=csk}XzMHBG4@@QOf2CNwPr z?|p&8rc)|a3t%!=Z%3hlztS({$hGL4MZ#_KV;hB55{71fM@bTgxd_OxMbY%Se5OsV zunL@kYb8~Ww5j2_?2S4pb``yH1IPv%fzN^Vs@FgOJm9G5V%D5G>-8g~3~~U;m>_7T zt#UiY7Ua45Ym#Gke{+r}L!tavs0|4{1`|1y>)sYmbk9@Cn>hjFuD|{`Z#sZ&HvAO~ zV_@0C7GTeE5W*+q_5&;@av93^IYVGd3OIA?ogL5c%iCD@ADF0q)qT6EE~Vux61gWp zZXoSfNG}~~JNP;G^uaF-=2%=vQ~EwLHH0DyvICig1k(K(sXv`P0FoLm4lnJaD_Q{B z=CIp|pj}KEUr5n#gqaJ^$J68k* z$lc7J5}#&RS+Jqt6tb}9@>Z^j;uB3|U2V(^5Kq-5h=4B$I|wSPM}BkIZ^{s1J1*l@ zy1Z<1CNSBiW;z@Fzq%1oE| ze+Ekb?*t|?_(Q^ol(-3gRF80X;*_c^K9S^41*hmXU(vDdlu28oP+%Kdp)%a|HKaLA!;k}%7M&qSD7%mP)H(3LQ=F)#`z1h=@-wR zHQ!mKh57buA1i7t)wQYf6*3*-Z#RV}u4*pl(&^{nSY2K$X{SlE!!DBr?qtf1^Yzun zl{bYqj%kj&$*Y+Sb(DDzpT9xvLWoY!>Wi9Y+c@p1=W=F$3qBk_+o*8ntYG-bT!Q1B@?|M%uP0s_Y+x$aSJfTU&%{7rR0CWmEjPHz|u!VFqwk`sBn{Ex+eC z7%83tKI5`j?+j%LxrSUyvdV&QZ>HRtAnZ`>wCV6q;WZ2#|f;(+PG+|X?uDAq$P#BaV@O2 z7LiR!8NROt_)RfSeyQJqo^z+(0C>|EjRuB44F~_AxE6TdN$;JvKU%U^)ZKBHxwpx{ zvNqg<*u^<3J1nLH)k(!TLpCYP#(Xxo?WoPd*-A9UGtcYseeU0lDn7ztHQjje_6)CX z5x?F}k}`(fM3&=p;U}4o2l|C6IF2({<9yt7_#Xj}>Hi-AkCKsY_4*)c{(4{ReZrU1 zYbY`sH7)V^Q^rKzzd9WwOuH%6Qw{_?-82geY*rpc_GS4?6u^Grh$^tgnr0;dN*$hC z6Rpt<0M zJbabSyEr&{{e55*M1t|L!seM>Z1j`Jwn!uGNL!Et+)oFceNyC^NVgHk?@-LGD{kny zE8UnEIV`_D^d?vBn zUbErqLmQT0_M@btLgwXsEg00#P^=*_gLae)L4x$BWoSvi2B9{bUU&&?mGct2{f4-J zCd}~oLh=^HxUYWQiZU$wV?wJA3n@Wy(?{}`Pag9XF*h|{YAmXBp)UoASyrNu@sh{; zKT$hEocKB-0CJ40a@#CBmDeM-vC7ooXGl^D0rhv9>2Tv6Gza_=33lq3tAn^C?6WVW zLuv9RVHi0URN2a>#~V9@cksrqJz4?Q&uITM@ipoDft&|*j;zJrMD-)112*>QZ-?*bRoh6g3Ji@2+y#cHg=y$oq;vX*;fe2|Ysv;Ol< zTWN0d%k94YDSq?(rvq%%MX?=?;$g(pe$)>S2TrYc^+3C)z-rze+%bwJa5k)9G~tm3 zhV4Sn$CWFwVX4Ar2Q^c=K4Sp$^NOrkJmCEC;qbAq-7=)M8{KxP(aCxW2qy~mZ|2|r z;Y-DrsLl<-p83nh8T8H~q0K%;LaF;3(<1pra*Xlkv@lk(G)g=$sSmXW%>#>3TQoN! z3lti?ev6)u_hh%}7NBv`;s9V-(uTIG7TDZAA?;gRl4fqEYM9{z?E9_Z>tvh59Q#qd zjdNCw=yIeoJx?7&?a@e3yP~eYF?L~*fSUi3S#H44%b!#a7L|V0F^}aiXHg~Fk!?-r zd%ndCC>By%FFKl7{?o_^|AW_nJeo$346!JL8jk`QLedIIQ4lX+M1IVoiMSrT1w*_& zsSNT#|3EvP6eN(X;gXXEDAke!LsJnviqyQY*U$9`oAs;%Tkw@Ks0369nuzr52*mzw z8zkNL41RaqBMj>Q@%T(p3wtGbi$Iiy`~Hu$r}Bibrc(DZfP5X9Tdp!gMLr$Wt%c*P z6x{>`7s;nXv3!*0{z@!2=*T_qY$2>f)=MqTk!>!1hhNg&@tZMVGbgA|gA6T93oy_8 ze!Aqc1b~;Xr&=FWSicd`(q95!If`Dr$*LTi^&T`!L5k=hB+^e XlQK>nl}Qy zw-TD3^=z%w*Q&nhW=G0MJ;w&e9;3zoRM*C=5)xssD7Mlq{m4Oyg!I?uXsQ!G6MU=e#hmf- zPT_WXqIsC_WS5JmM0>!0gJhVjJCr==6BDs{_2+NWThl|D7v3BhOVi5Wk&=r#HuCOH z-qjy01bwXHaAHc=;IOUt;^i<*PZ2?c`rrvtGy7SA6~gl@pTDg6LVEO~hLJ+SWo}NG z9pCrg$)W!ZwaK4KvIOZcp)u{Y0NrFncg!Ct-y?s_PbcyyY%OntdtP;tf!8FsV8qnu z!vd}BX#4=Sja7O98(z)3;r;vzx!qev(IAjtG+whFlm~$ojV6|Wwhspr=JZvFN^Uz%+C{91Hc=(oRSij(^ zpmkf-Wf5mHI2B4w7HiVQd@j3j9PI8x6%e~&K*re~YLoEqE(?=4dr}?9o7Ag^$p=~3HsH(%_uoV~hm*GGJ_GOAPk46zr^}PElU6jZu|~TfQn`J`(!HII74V5&eOtSk z5FY_Vt(tX+2;xl)hARR3$hUL(=fgi8Ksh+jKDm)X-1I%1e zy-ZhU_GA3+#Rtb@%zHaMCo;m5M;D6R}Cm zM=)rnA^1|Epf$;e1)E2#DwNE4R_w@)eahQhWELYF-p-e1d|gj6R<=3h^4 z)IyB}0w2alYenYxrH(CSI#mBPUXoAfO}Y&H*;|g&f?od6AE8xzYgoI=yt-ub(9d*# zIgqD2KdX1=2lK$BBe~A3XA|QM*|dNCoz~~P-p`thmsJdjB=NKc6MyGzQdz(FZ@!dC z&2@hywh>u9F2`I@Xcu2tsf1Qk_`B1%x_8ZWz8iV(cttZ!pvtBO<=cXo=*I&+ zZv;a&+2UwWzB#=5O*@_gzSsRUqFAm>?Wi*O?ZXIsw%|-MDm?SLy1P@-kremWbp)`? zJ~-v)&}&s1&RU^p1YJ7rl*!@^ncN!OT8m8T1@aPsJuDo%4VLn83$oXi6A$je<8X7z zwx<$%XKWP>&hDud=BsO*Wcl033>JY`V^L|PPJ(Rg zuMSN&Gxf_65~5A-qS?ZNNfsyAd6bs>{gg_60s z*9jAd@A=9x;gY-y$9uphmiq-&ACz6{N;n1ZX#8#KjQrpgXt1`&g_QruBIQf!!+ zZm;y2ZIc2aqub&rZ)_%1)L#35lBa($-?heX~^?~*V=w>8;GOhDA02uK3ebUq9ahj9pbJyg6@z6KY zgtVHF^lLl7p(oe)!nm0dbh+F^T&{da2mle_ipVdG35QDYOesFGBL7*89b$ zjq58CmBe^mDG5MIq*uK->9pwk~Mx;aqFx}FMZsO7}Luq!YE}MzMO+rC-UbBEa_pK4l z$-dv(HsGl_wLq7b@ePiSt5V$k~`dLXyl}%t#-r4CFI&B@GFV;EJM|yJx%<*ghrYM4M zuefS`nZ*FI_g60-=i8}H@^g5|M*H@vUF>e2C;xC%n%NOxe!^j$2TiAg2fIaO@tB{n zI6JTr<4cbZmj^%?*mnVRL&auSV3RXJ^NKacJs650e4O#XF%oi8ZaE%UirHp53z%EwjTgdU-3?&A05J? z28?ai-}C*pXaQyme)|Fdy2-*kSbDgqJqRGNLcoVYY*f6o-h!ThA;Zt7c>s)i^BYYRHc(pzQnp%`0|@mJfEmg1y;VDv4k;? z(oy|q8V2pw2mE+=ejFvlGyP3B5`tyaM&mRmoyZ>gqRPc*i7byD z4TL1FX5SX^R^1uCJh{O##E>DP)m=a`K%QeD(`cDdpA)8d91^som;2WJpM)^8@1KEm zwWVxqr=XwrNS*Ezno3QP<0I?*omCdj&J!c%>FJ6*#tmaQ8gvcO56?Q(U95rwbSzyw ziZH3NFEm8J>A^ZAhRgnC5UWty`z;M-opdL)#`$(|WlT34luJh(p552S6$*W#7QvL}pOXLgV9T2`!VA|B z6gHlc6Y!UIV?fnZoP3HKri3A-pfu`zf-&phE_tw{J*l&i=xdS9|I`q+1Te*&3%Jq@ z5LCYmyvA`06(v^bFLs#jSK(0TL2JSxAgv*XBxoD(g^T13r*rc~TFa{Z%{m%K#>yEAi%QCt3 z;&L~Jpn|yNbSgf9>wn|ae>`Ik{ZJ5a7$hmbZ<4kuDl$|lCSAqH@w=xu_k28?vYCPS z-%j<*VNPm_`qN_<>aQ*V$ZI}uKZ`6DEbs3Up%YRsGBLC?-#u~Hj&zx(JjKN{~p7Kyab6Q-#bkq`^gfSy)RdEj=Ft8Fss%vNP)>i7>B03) z3-0bNnaz7<=FGkGeWz|s-MV%ELJFzwz4uzrdhCaa z`Cch()2a1f6HX5#c=Q$$=;Y*44YGifb|oZDmOyS7;BR~A%&+03>$H!KK&AO3Bv&B7OxD<}{ ziRR}(lRaio&r zUVH|Fx+v5DmPH^2;sFr)?m(P_=!Qtoc>LhW7W?q!xdTBs9NwpSC}qR)sXU-LlY)?I(AC32k&|wI{>1kFb``H zn3}BVvSCtGZt_SA6FUPesj7d56L;FWam3({r&Y29-_mD{-sO7Iza}@Zcf>?s-{GM- z+*;!vbv2fX&^?dVv6DQ(<`*=R;rv&*z|a3@xj@0U@7DEME)zqwDntzb5r3mN1*V^@ za|OF)W;P5-!#h5sU$+y!e;JORP~5tCoGgngybvab7hQct_EVn0b4W4+tZv%Y@orn- z9X7tg;=wkXfm~e8fmGHz-v53yQvc7R(Gcl+74D{e!T=NA-hUj7Py*iekzJ2rQAAFO z=HqJg0Y1nQp+{->JA~%vX`cA1B|l2UmpU{ z@&5C?Wi)3`6Tmp@MmVhoXM{Q^5X%yo9p3;QTd~2Qze8q^tBUc|oNwa*I5b@#hk7-s ztQhmk~2*f8adC_;82bi+a2JQi*BX+-x#}{z| zJQHssUNic2f0ul`wgQ|_)95l-tS!IP#n9hJIq~RO3L7Wy%k@14vI;PzXscNrWp=`9 z@H-pV7q@+UQ1^fXV)H-9%-Z7!;EP_{ee`d8LN^emD&TLZ)p>Q50UbtlsCc7*I9In^ z*m!}HdH3XgJ{u9!fP13d0_>q2+`>crcKfs27)2&5-NY+%IsJLHoLS{k%RflyDDS=V zze7T2qzHVr1NlWVb296))Vur;3Z(2tj@P`;19jrCG?a}zKBs%a3*!64{Tqt+Xm4cw z;jWAuZ#w2oF3eG>dZMTuY=*Dl#jY!R(`JF3c9BM&w81#!eA_DqYcfr@Rq~8h#@lbP zsNiQ6B)7GtAI2(UMH)swaNTcGSIJO|^y!#-K0n&tl8rx9d(E`htjuj+1Ed8qc`sg` z1V=D>Q7JL?C``MQhVrg(3tPx7p?X-vw=aLRc{^-4uK@FEIykIUc>C{O88$sg3uU$> z_8oaome6MRJF050@Ps=6vleu_kAeKg>-B^Ic!#$uPEXuLlOdf{K1k>W0W<8gvKqqv3B z#lQOAxgr*x)6n>j`{$>BL)rh$Lis;U+W9z0h>x1OtlY|u2D`sybF}_?0D@}TDRwxO zSBBr@Utcrkw~>s*SDwx<%DN4A#%h3vZ{z^L>9W^aU!W5RDPGG~cav420!1+};KZ+U z$*QjZI$%Yw^EUNbp^qsu`?{Yh(4Ou^V(~sgtqfmM?MrO4Pq@O%ASE&cRjg&24aTQX zg!fOuX|$bawLvCLCMW?O;0GtH~zq2k8tF%#0D;>pv}0#%f9cAw|lKXma!iju%21&EDxz_#ufe!lN@}L z^W@F@5rS@+oJ39~=Jl#vc@78pSa7&dfq02^)^#IQ4?}%f_0J^R<3)8Kr!RIh*5&eK zhre%m{GVp`cjL&C`v}+%E4dZss^)*(hB#%9Wr?-fz<*d|KH@}}c{m^K;k!5e_QFq) zvj2}rcnRgSS9d*p(+CJT)ud%r-y>^Ss{YiYkA40X{%i;SDPBM?rr~>R)NT>3Zns|T zzzk|h)MX)YCOy@-UmR{wtZ}3=>JPHY%^PFPUj0c2h66K5!}x8pf~0wia>-MMsc_~; z4aL86`?O6RU+|I+q17lwVVRHV%?Y;pU331oq{eD1(aG^rxS_tBQS% z=J&G--RS=h*69DpZtII_h2yGzl&O;Ok>TU(Rn?<*4;|4jBd#+A2^ z>Td)-jXv(lunE8Y&KiFJZ8(tf9umGws7DVP5*JM&X5VbH-N%I; zLtC7xsQ)m+qS}^;I-MryYJAgL7W@A_~?#c~!~Co&h4B_blG;e)a6@6g$E{SqZpHc6mVt zk=Q75N6#5f+RL>lHZIUBW$+C2pBH7X$^D0*METa2@88K9x>3@~<|kS=fbCTt^=l&4 z`yPBHWud(R=D)MpRH8k}E=$e5#COfDBkxkZj2k}3xBRH`8BrsL(CN(x*X(!PJ#L!% z)uv-r*U0*MPed}`aE##qM8c>GgJR6_^QFJn`#*6G0n>*Iu%CH+H@dYoVI`1SKi zJ30&b<1i2=v5}I@*qS2OBCL%4!{f>7+(~yVT;Z_V6DfP0NafpwOd^kDZbSQAsQYBN zgBhO=)chl?eO=g!axiQGa9zAsR;kMqdG5Fj3pxZKqt{cfZ=LIs|CMLw-NMm7?ph$K z#1+CkTLQsOcEu>j>>;|_*9QF9<0pWMfvJVT)KBRi1A)1bQVNg5Q=YKI!lk4yQD%*d z*KU_bPtA5=bPM1;o}%*IuU!ov+`a3j%7pmd?v(c8@<2Tn09WCkE>s#0)ZYsMcL#;h z4%Rre3|3dbi%#lZgC%-H!8;Mzt`g^Q!#1lc5g=Og&LNyW8j;7j{UmH ztTQ0VAA1LLpbgN)o1ZH39`@f!0ksL`-Jj-ho`N>H?e)t4dJPYv5l74tLDv#?nBlns zHX;lrJXf*9@1L2Gyzx$*mtpac`*%`Cf07;e2g#C&e!Rd`hRecX)&6e}?~VeOJv%up zfzHDyyuhI*vyt|j&FoLF9*z6-%>32z=!^%yzuN5{y;|HmQd!$#oIl%=+WLHj`o8d_ zRQAus;et_`fS{nl29;^RjWyjHI&3;hLEkqItLoiKjNI0=ThZ_v5tHzY-HDFHL~9)S zhg3aN*UyTvmV=e`6RJrvqHgQTWO?H6>tX;|WiE;80Z31MH*Ht#5te7NMzt|0@<a}H{jty z#)2g+1D?pO3|=b;E9A=5_keNDTsC+nw1}DiKG%EE)w%5?EXL}n>~kCe{bg~wX_SYw zdh&^w$F}B4{B4Kox!kJkHB;^DzsJ!W7Fm7Q0Dk}v;6W|GnXOd2*e0iH@|~BZz(@!sup|D4F0*#FbK6s691=7HVe##P zl^>cFlAZ_N%b65otc!2D=*k{nB-+FGx+V9LZj*#OO{WXA9o_ZzGnCRe^mC^LKTNx} z?+IVIVC9O3Tb)noZYTHs^oz)A3tXg`3dM#r|G0>;*KYeBj{Uyjr_b+OymXh!i+N(i zw3YeB%U^~6MivbiLt{bj^RMkCPLAKy?oW}NO_I9R+M%tFB<%Oi5q5D@$>VH=GSkS- z(4MG*KjlSd4NB`K@wUO~qi-FCksoB<4n;ZDX{4K6k~sCn>?8#Zt(YCmrSASo8&v4w zcS$$2LuXx)7GlXPfIv>Q{BXq9tgay^ZZ;HaJ#vcWbDR0xjO3ZxZW?RxIaLM8#=)k2 zKU(L0FyCu_Op^N(kzEH)ae%_s-$yd0MI81(#a(%;)I3is{P({&rml?%;_zgPxR;pA zgmDR5P-M@%mE^$3?~~7K@0awyjBA*ysl1e<7o}86lc4je%Xwcq7EW_5mltOGsSC=x zD8}by&`)&(+|dS?_+~TSeaGlzw<*cATz)D-u`eH{-pASljCN`r-VCDm{a6^*-kb2a z`St@k+NJjWvBt)x+DfLv8Qe*%YVETw4Z|MoGa{-)2oYIgm9ON4F|Te@+~C9q|6{2z zg6RJbB2=SSrHY=Ph2EgI$EQ}>*eAeuqYC@F5>Inni&;+7VxsN25MnTK1JnVGd`ZG4#{0*zbSSFGs} z9r2_Y?eSZdxTt|Q)yhivIGhj$V!c5XDb2Mb=bs|=oB~Q=>i|>n2Z{g=0p?X10g?MZ zc3&$uU*fiHs?;)R_=#u`JoY#OHug_0MjXam?&Cuw7Dn_#*JzJG6$Y0zB5ep=z_gJw z72PgiL2O#Hx?!~R;#I0#$b0li%y6Oxj+?$_Ed|xv%Zv{L8pK{t-4l3$K(U7xFm{fh zlMahj52W=(uBg0aD`JQVA4O8UtT7{)lLeGn28lO0CCxCnC81B7G z%2eIAuc!^XRx`l9M35yn=o9nYeTF|-*b~@gs;s^%yr+1b1vdsFS52E)_87IQ&1f!& zV6+7wkc(ma#r`5L4j3ES3Q+O-9<&;f$mf&6B%aYg$stz=wr%dL51KE zPGhiFEDOz;gKxTJSmp`*#p-{4CiORi%K*g_7rvJN2p@Shse(Wxk5T%+h);W>xqyh} zDKqO-3uO+UbjB)(G*B9#?WT-2lh73e|GD~F9}QiJn0fXK}R9)!-9~W(30|XwrhkTntFsGUvzl^g^-7|pBv-+hrPv~txb=d#_3Na#a9uo$Z!<~+UDc@p?Cak@dqJ5v(#q4o5EMB)S65B9b8xCY|jrjb^ zf5tpDUiNna<__5i^H_X(;jfQ8m$W_r%JF}Mf()<^w>NRo@P1y~S31%Iv8}ny4#PM9 zg24sqdtkUI2spw|>`2IdFz6mQ!5lobOFXBLr-o!U9?xo^kN3Al@hIlC)(=i7zdc=N zJp`<_o)RC<@d%t#bws~wYdn0Lw+KJ5HnH_De+Bbi`svy3`*+AB@WE5-VGh`%=X2c# z2R_{g!#Oj%xChl-cxC`Pj23lBW<18@eQ+{d!LM*Mp{nxSXQw9O37;%}F;H#FX++6KF9Ca0fm-wNyAZ&h~l!_l6n* zEZVYou|r`xI2rRiX;SL`!7mLd6Xj7LF|KooNl!d+`*e_eYNqi8D41T!=j=z=j3j^` zSX6`qXK5xFXkM0!H~~0LwM(=dpHUpKeV)$Fb_HsKN3#+ zLWc%8XB;0cUK~F++e!GANO|`i0I`9pA|zhT!H?HVzDE*W(XMp?osYNnN?{_W0(?F% z7Cf{?GGhHFUHM|igVa_huC!0+6z^erbATC^*FH`xzphr(cl?1w@y(KxbgrI(KU&WbHyi3*3)MMlt1? zZ{~#eF2!xE@7r9mU_hb#4_9-eNFQy3L(27%sLaw(n(B9zLg%~R7CqIEVi-zEkFm7A zVE;Y+O%LWJ74rWr=Dv;VkF{0z+md^TY0tyhGsHVruJqRAN9!B%AlE6XTe&tinq8Rl zIlE9r>5e2kv?gD{Z&k zr2bs?jub#1VE*#a(Boe(9n#rKP=}vS{{K2*y2op@jwHhms39DD9F*KApIlG*aZGLF zz`uoy9EUR;<=@?14&BL3rs0n##bRV)xjylnfi<&L9Yx)gM|xuA?5`xIlMcQk^LdMG zV|TJHNK;wXDbk$@Q^s=Wq8-Kx%_$EOrg)^l<)=apf4*2Ut~2Ti1~0-re%f8XRGR@p z6EbygS(#xgmfQKCI2rNlA2~hE8a5K6Me}+L^HOu(h_v%-fzN>?#B~uqhqh_E^;v@IfKr(Z*+IpGU z@V%~);|5DN)X5I?IbOs=^DMo6MFPxy={@h*L^R8oZv2{WcuL4^pV6Q@E47l8C9Fq1 zrbk@_$K&)>uFarJK7~Y&^!qr7T%a;a_>s6zLGZBoF#}G`#Dnr~-Bx1|)UR~+pROCR zepg3^=h1~abegvYnMZDR9jAKg1}Rz9Wl{e-5A^6<6ghCZ)^>@&pBS5VMK?h@3&;6p zL1XFqY?dV}zD*r7Z4I7UCNffQM;7^e%Mk7I>;F;F&5#m}`=4JcVU9iuL2>5+I{gJD z!uS{t@T~RpKR5pr0~WIJ#UAKERf#B82gI{Sujn#ZgT~8nZ3ZUs^#^mfGt;XT&YFeT zL_0<&0is551|IiqRqoPn83=Iy<%Y;2!}!^1>#T}dch zX@KHCNq`IMZp5*^&etAqe{xA#xhg~jtN&*lj4?R$#zc}$V*pW^8_s3dB>NcJlU}8W zQuh`AN3>{{-+5=bZA+8LG+_-UdU?x?rIa53IMr)hg7}usl(_4~K*CjMHE=BC8{|{C zZaorrGrgQ|UQ*@otEZv40iKQqyyUE2p!XxffLHcZJxhR9w8sfS1CM)6#ms||!UqrY zR3Jj(9xMg7_6tAnA&JmL>o&|L$S!D0%2^@xKi4h^Iz`J`h~iGDO0i$I@F5O9-9$U5Ax&1)@+kj`%I&zNwtEZBF?*RX9>(h-De9Psm-wWq*wP2NvwFWICYZ zkxxON+HN6nx*QUiVh%IwJHa%FSgz=!-llWC4tKs*&&0IsVQL{$3asNDZF%0&(z$JJ z>Y1Otwb`iMAMfTZq&947+IrD7Bv87s7Ch+lqTd`Llp)h>K<`Ni#f3)s1I7TdjH(c` zryhl4LUcl$u|>9|f0*x28YsYqCnwkf6I(*&UHhp*wtwxbr-pw&Sh!OwKe~Ik`g<9& z@zLq*r=RX&QgkbjA;366r&H=AT)=ea&JoW^xb~|PDW#70(7*ekQIaJCNlpB>jU?dd;Cab<0ECf`GM5w^k zERiU(ZjsTy7I}Yf^uN1Y`F#GKr0f7QNY!B%{?hf4b<>#%c9OUF3YMJ?}Dx3-z`f0X28UM6>P5Hopg+hyV~pQw?eh~K;KOw zMY<-T_(HTyBUZd2j&A1!>;T^mo7@12Far5gxz&hx@~(S2s5kTqy>9MFlt5c*Y;7XW z{MT;N<6!CKUJ^48Ky}njDQ{j1xNCk2*aHcuKc2woEFP$QE6@~i~ z_v+tY)hW*3RE+%-rB4+T)id$*yaMb4@7-KOQzB=-bpq8t-nn=EQXk-d3Zow*{=Eu3Owm@y7Z;9* zRZQC_)DT~J%JRR*>6G?xlMxStaLl$Zw0e63Lvgbs3LKUGFvN?~uO*b?N?zyB1Gh*Bsg7|*Pbcu%ZxfGk zg>eLbRBE4m@EtHNxaDaahX>vfI&;o)oX93A*&a~)7?VtQf7SVS!Ex)Q?0r15^LSV6=oOZ;iz-qozeJvz@9STFCNR z^1KL1!?)!uo7b7Y-YGu(MCbWvfJIBsmQSDLKC&XrwdA^N<~;h_Nn>sSv&d@q{o8jX ztu3!dM-DrTw6Tw|DbS#7b;1_epCY4QgKSXq>gBjbJpa;y50Ye}TvwUr=(=M=G}7NS zbaOJi>64aRlw}fJPZ%F&D#pCc()eeJ?IBD=w928pRu=W zNc@)-_C2U_Zskk4DB5hM*XT~${cj?n=_^J7n-ga}(NGcuZbVb0HZ&jnWBO$U*nfV5G94ep zg95_x6+et?ucxC0HIG$;-MbKbzh~mU5?mI^X-!1gqWKPL7p4wsRS&t`-eyU) z&dkp4@8^{S@qp&KP@$e-IG-yJVWJ3NI+6#QV0GJcrP~#LX{X5WWU2&Cq-FfT6X;KP@pcr##bqoTH@<@S(AB^{W=W@HnI%YA8`ClmqFDh@V;0V&Z?EZe4xmE5$?Xg*M9k*rsvME~a9czSzW#1%oFYA4sCc3FsU zIaUzVuuQ~vr#Ssf0tiX&eTWM>t-<%_`Q~nJNo)-XpE{IggG6gVGg!v_pdwi(2;|xH z)xp=@>dlRfQ%hskFH8;_gQ;aNq%PyLIzdRYXJpPmT( zT~ZJ&^-=$KAUpiE7R0BeF1?E#U8zdjxxLjg3Q_}zk0JQ4~KOufyC)yBUmv{n{Nnv+P|= zOLvVwFoi-DN7`T}TzJJWh;ln<)YjHEAVFsu^-~(B(HA$|vZiPe;9M1+#XO0q$clU{ zO|pqZwgosz7$(WWs4_HCATIPV-F}~dR<|z@)T&IcgSm3#IZa=(i*VkiGTRvP$+B7B zKC*Y3#SLAB zawG>*IOW}=J8rNv)Wm{&jEU-yv_=~5#fd>QLFzC&@yi=FFz_3194ff8us6B#6sjJ` zz453S8I?PeR_Ce}dQK z5YTYin!B|{ax7~Yih&nG4H^Jpc*2H3jEO2c9LO2P+BCb@g$pw=92(moWz5VlV+MPo zB#YL1FsO&At&nWZB6~4zSbYX7Ac2}2F2A6E93gp(=;1*RClcB2Cxr)7jzx&SsetkYvR92VbXiQj+$Ow##`XRh8> zJ5>bp7zeZLVD_650yJ0T?99%T6f3&ef|m!H3j#lxYktUD>zCPI>`r0eDZJeVhE}O3 zFiIEBAi22o#gMi6{CSs>o?g1{akOY+-Hm~J2_jr6m85o*b>AG;iq4}OA~#=f;^*g|qIZiX;nux} z<0zsC@R`fINb%sh}rtJi@lAj%d? z{k_+206PGbR0Nw!K6yO}CrEX3o2*dXTdhY}WJQ3gxL$>RfwVEGgBDa#a|dMd zwP74{3B8zins#7mL1}eOL&GdKEm|lyL>5iGEhQm6v&Pr{Rs9zgaAOSO1LB9*9inP3!*demk14|KTw@WMXIWeo&4m?!=N40zqOhjm34dL8EDlKoL9d_I$H;Nr$sM=MoS}9&+zh1oh$E1B^C%ObuL5A zqA!Aqz+Ov|ujOIIee1$O*56_Z??bth&x`qWGltTdBXo{V7a*Gu{vC#5LOi!_yid~1 zM|^-dX?PdH{71qs7;7e|!Vxb{>lev^Q-~{p)G)~^Qb0Xsq(e?L_XM-(Xa6&nx}v3d z8Z&$}W5NFPrB6-&5>ovyi^4P-4@%kgvJB<5y32DaKMv5R#S)azlaIXc!Dq9mxNen( zl5*LT#YDXD5&3Lr5^1#ield1qgoKTpnm!#o1dI72R$_dBNy!Lz-G$J2tPu2zs4#lw z2Pq$Uk~vRb+WA4u0N_XPE*0;wxDw0z2L|2-i}857aW$e+Ftn}gdy3Sk5f{2sd<|~q zGi$*jEA2<;g)uoUOTk^!T|dNFw>3#g(&s-K9uqr?NiyA)oliWBa&{XV1uo0!^3L3X zO>aelk3*7H{v4%KYU*x^A{JH(=?`Y0lH&fqx9tiJBfpR@a-@+D&wKGq!`2Z>uNJ=a zq7z{=pnW!wau9{bPV|25Jb2=AO-UJ#Y_OX4VK8OlGQ6h1!K3EcPxU)LUptXe4DTqp7yYQr$}g2mdE6k<(*@CD zp|}qSnZWkPIZ|BgqC164tX;1Y+5~&7aw+ zlINe1G!0vv#)st^zmZh|&NJ$K8pzRfo}YM*oqLn?n)8tDz7a`!>F z4&$E)BZ;a1UKp(hpIdd`C0@`f8VIq67C!rUX1dvF8j0?NzFQ_|8(aP|7fqHjma z=3}gm^~zr>n205y(u|`R4SsA>m=}&JklopP(P_^y-ZW6-GJh)<8*V~WrPD1AnnGOChUQFnZ4FC1Jxrm%2Qap0CIh2;xOj% z-R;^MIve!k(0d9`lo$X;^IOy8o9UjdNMZztUut(AMG%Y3NafVfuKY;6*sH8EUWf>z0zh-5s*pEb|WbqV7aP_}N^VsRWRD8p*H96n>!?W^%|_zO3j&x;FQ z%zwZRV)`^D>Ofi0X9Zn#9X~p1Qo#{8zNY=`=!%9l88-++QOi2BDAO07F+XLW^s&M2 zCDiRy`MQu^1DOfGG4k<4!`@dy!^y)gM)pH+q5I1B;)Pz2F=z^OTYv=ve8ogHy0!GM z!6+Mx_Z?d_)ly^#A|a4k9vf($av7ts3;tsg!D>j3CJ7W7#^5lN}x2h5hQap z@q+`1l7Zo~e!Mc;z6FC29ULVAy@r5K+UBLI>T5J-})y z5D@x7>k`0Riper9hE$RHN~0H9!mfpgCxC`VU^jgzZz^6vb#dU!<~Ubxz{&Nya(-a4 z#gpA`ybxIe7(tQ4+iV0L1Qn9;3^qC%?EoVDXF_%1pt(~-$X1R!&I3S5-b<2Q`j88AkCB5tP)Jb?+kALqS9)gvtTotJUjq1~S!cJwHl<*1P}q{s z8n)f966^-Y`7~e!kgx8z{X%0-6cB^_Gy8=Gr#Z?ZAhb^aVnP+4EVIU^ioQm>Db)rk zz%kYbwe0wTI-5@cIs;mj%2p#YEO8bC?i`6qpIQ#AspPf*AHrmz^-5Xtjhco==@3`z ztDl_rG@>3XKXXKWam!v&fp!2jQW|IiU4HMZB;k8m^wgRZbOt2;;!nw<*Dkil@>ZAWy_@&rUTe8>wjK#!`)CxE2k@-sP8ZU)FSd zp0{nrEt?`6|0{FyzijjW0d*)f@IeX4n%D-(v;(_6ymkBt(uFCAPy@*7$Arr0Vfe?7 zvQxm}N7KuT=?rCek#A%ZJj~|umXSJ7qx9TEuJ4n?fGPmjUw`lGw$Ewr$jH=8hjq&^%fQ+>+x~x(-ze%?-w0r-cY2D~-Uk{3x+5 zqbU6R)YG&+QWBIBA$AVUr^#%OAmn>vfjW|yT5(r4Mpp5IaQO5p z)pxxc=7(xRH$jeF!Tb6%balN>GZ(-HgWB2@KOT`ym`Xa%e3w1ah|1 zS2Z-#nLT)n;78gHfjVhYOA_(_f_M7QcQ43|7y;oM zs1N~0h&UF8T?B3hp(E5;fQ4D#=-Y5aF>TH4)nQ_SqYEP)paadu#Ik6x9EuwreH1LZ z7ck`3UiEs}FfJtr7`4!zzo<(@?J|Bqj*+z-E(G-KnFt7TgVev5(S)WiibF&(=0bms z)Jxi#1+f}V0@d!5j;DTM>pj-hL6DA-DLdg^H#{eCE1={EUh^$eUO7* z3cmCXB1EiEk%L$udig`E&%OjBGG>U*a_EoOZ)kSQ04_z%I+ys_x;EgPF}5FMdfxL@ zX%k4imcr(4FZRkqCoHY3wjo@YK?>6e5_2^ca=#rX%dow@H1&uX!65nWa9}t8dRt3c zeTRyND_ZlN#1D!4K4c-ulTA7=Mvj<6m7JCs{xOUiGEFac4zSW%AQ8Kc!sk$(yZ|mY zqU0lF$3mq!3smB0IaX9q$}u+P*MdvVK@0N&;hC8CU9zrS%CMKm(ztzHB%p5*U{&wQ z3Ipv(yt^Li>!{ulAly{gS9r+f7mhVE4S2+faC;EF%uj^tEpTbf*{g8j(ycSGwZ{q$ zZc#zI=k3zOn;=HUgve-sr)}w`y++HXL#4P4?IAgaab4qd@)B=IX|j_yUhPi34B8@% z&VYHxq@K`^&Zi%tmHkZsFu>r|bDF6|+rO5gbGCa;_pLU2xXLH)0R8i)_wec^4hwr@ z3~X$54%Xy9WWKah(1>7nFFLr0(kb7G^X|n<$KZi@O;Op4yUIrX+k2M{w0VtIo2gZr zqc1Yq)tY=!OGm)=|B-@=Ty{PJm;z-|l=2%T8mH5_rDbBye7qp~k|um|Wy?dvd?*;M z8GyW_C;M_f{!ZK0I)ofoPLTS}FEt38n2Js4H_1;F_w-Tv>3~=X!=f4qu=5hVEuYmyq)Hp- zs>OL5tR{;Nu{rHN>FTp;>U!THAQ4wPdFv?3;olh0icPomi-{oxXJomqF5|xDg8UB? z^;eK2x1*Fi8yVN_!-KF39W@Qa!-j&c5zE1)mTQbsw_^BH#)xd})g8X3tr!L)!yC#$ zr+&wwN?G2pA=*t7A?!$4FRb*^4%X5|BcAePssmv*fK* zUb3{+65_r*;(lFsbs#s345~)SM+3PIrBz}%9lnFcl48Xa&o$VynU65_6eEC`22p4+ zpV3l5jX6kBY9>`df0r>;abZe$en{6_@}NXIie+|)91}=+Tech(feazS5JCT3r8C(9 zlCcY7^nu~d7Y!qM{E&o}vV_wb)PuMXEa?NBih>L1DT+y#lY7wX5Z=C&nzI0-6;m`i zFIC0iv(G?<>8NXP+iGa?xuoA1W4gt{xZt7jEboK7j=}px$CMw(iKFoBjgylK*lGDU zXH{z!RG1u~SP+HJG_(@?VXs&<5DX@80v8|#o?*U+oO6AN7=jBOkNH>f*;trOl}mhb z>JM!;*>;y8vzIk(zq&~fN-?#ko%V45{7(kHeO~WVs5*Nk02=-(2 z!a0LhY1U$eGpAq$c1B}o;ppLXV)2M+&OB6+%l6YdPFrfzOZK7q$&|1Zh}zJb{?;xCKEE zXk9Dc=!{_`EhLFei36^ypN%i3e_qglkLwY?JgN^NtR4eZLpNPg&}@BuoP2XzG$9b> z0}z#eLuJ^5<7EcK5c|WWc>HB*`auK&@>5jrO2puRqHlSFiMBW-@NJ>_hIW+z3?``%|^HM4uMm`{GBw}rQ zmH=||$EHy-bFTH_X}cU9qtqcy0X_3NXm%kk?DOI#k`K9;uBJitQ|AHVkHl1r{3hTe zS&ZUmfhj!-gb@`|EAe>Ffyr~{!Jk@Um#0U2-$F_Q0EP-f7=>jC*#C3_K$*i&-nA?e zNO2)jH-IhgGduZAFB7n-BF5~C~D0k$eJjmzW z0NbC4VUtrXqBRhRG`3r4X;|_$F?EgORRU!^1$9%KqwP;7PJ8oKYPcwPSXB9DN8qbR-@hBcnE&xdN0eR%XQdZU#Jq1k85b9Gb^XO}S zAR?lABwD>d!9iCz%iUT})2bIcqvf2%(~mt*U||m(O9{c4EL4AfFz21{`%w790Y_-` zV}T9t&~jf1Y#!v;<{sR>abuTPlj?;Sp~z|$_TGxPuF@)EVHkXwfYJS=30-B zfvlylho7#7L!@&l0Z`q2mpBFpjt!yGFXAn&ApV!qvvi_SkaWYTsKSyd2HQa?eV$c3 zR?&}^8jj>&@3Wej@>JcpzxGXA^^0Ei?*=SsN@vP!c2=1573<|XmS;XIzjPNO(p0w) z$>9ly36>8FqJ5PgANd2W&u+>UX**U=&>5kUPuVMB^K-5X@OpRhP)@5HW2hHf60$AL zDUDsyiSwCE_m>7L?S}{h#k?#gok5cqDYq1hak9?z5!P|D$#%yG>8pi79jK z)SK}Q(J_{Ibpja~OVK%5J2eu^T$G*N2*D3@FWvXU`ntkh1eV9+K)I)0;y zRu=ZU4Fp3mkM;=$N&iT9QQpMj15U5mL=<%)bl?L%?gUio`XS-3IFSf*8>AaQ(J`*$?7$u22W#>=TqO_RddREWx)TWv%u{RJ+>>9SU>*v57%6+CWp$#yZd zR3M_U%|&_~7dBLC-tFlMI|jXAg%*Cag?4L!h6)lHPBLhPPH$OIgUdqz`MG9TatoiM zy)pmCk|n*~^IkkNVvRtaTwO=@d76)@q{AI0W*<+#Ssy5WNwmCfL3BT@Iq9veA5o}< z<6wW@PTA!3Q2l0ulKi(g@$Wl7i@L#p?R&`X%ou3;a+C56j2pJ(E4e{#r@Pth^9XBZ z6{xUpQ(2ocQJ6eCF)0xevDFm(xJa$7llvq7{T_zRNQ>-kfQqD}Ii8R5op6hzG{ICk zrAqZkR3U1I?9V1SjdB&Dogziv$pTY>hOJ7$;PzUrJ_&o@O|DzdgOz48!75@E{=dm~ zt$lauGcsR=tE?4jvGT=Ve8#YLLGVvrtW!nJ*dDZ~NzL}`jzzg-lC+=x%~-D_is-|d)QY}} zW=Un&Vc<+`m>m^mQp7C=>SVu(Dl`k5;(d74?_V*!{=&x*Nl}y4 zs}zG`+ikG??bT)=#t^r%j$uUmd ziS}A}LB{B~jcRan?!HZ8j@K4}4SVzAhdvpugj>bgXKWBhjFLKtS#7o<`DEbNV z9`3+PH0I#}PN@vMtWH=JZf$4(m*+@YoV#bXqx{ThdpOxDjurNTOBAHLw-OR>E#qyH zBW-ope;S@=ph1X2KDtYwW;Vh}QL~d%#d{7fj@<2TG5M_AVkgV=@NMMbW$aWFzgX{B z&7$k?w|3dY3JZqE@p~IVxP}zkWCA^B;`&x0+{vzz=2cj4t`(MB=e8G5o>?U>6Thp|lH9A(Tv;;n5(o>OSyPnD1b+0Dd1k1lMYEk9z z*^VHZMcr{QdB65mOCQ_xee$olg8-)!!YlaSpF{cnSUe=@t2Z5n_%EDeU5H84+ZuSg za6WV9mHznpSA^EF4z+6M8NP{b>n`v43d}Htl1k?2vRVa~cl&NtY9!ESRn*Ra(M5w$ z;?lPUXEXQG5g!kJp4!XB^hlLUyA3*lKf*i?NZm`UH$fRq=C!Js4hoy=yvl% z1`XXM&c#xsJSc+dafR9Qx?25SQZbvWZ5^x3svpypB>d zfz2yIl4`yF5?!N4qAss8gn$&N3ux~Qa|YU~bnaa$VcwAYA03U3^It+AcubqL(n;df zY7hitv`9!ZyII`Fzv^+z=4f`4Mvu!#s+xN2PO3zD*HKWdZ2vkqiyL}}w7*m|RGRlm z&lG7;9FZ`PB|lm{nI`q^$-{psk-zPpj*=jYbI)cU5moZP#UgVgPg$XS8+x;~la&T69! z5HvA=X;VSkPY7mL{Xcs0tgJh^ugq=_^AJT_lOM<>zsYg5^Kd52nqQO)zLRQNe@z1N zrueEbzw`1>;Pbr?*DCs|YB+`S$ohC>K1z;j zbyU_eTb}iQEj0Ddx&6PfRYZ>x#RHrJgs|UkZf7sYT|ytIptoS+BppD$en%uDXh^W&cyZL-rPV4++lqd4srFQVSB9j z5&TAQe|Ih<2_-tPY0MJCEjv^(_o)u%T)K`m|)2W z9-4u$p!k-PPbyI*3R{{_2;xWo>zP$PVLlyb2A@GOV^A0gYz-*J<}cQzxmF>JaMWuw z9)okb-=GwkZSOjh^xyylKP@oH7Kj_){vT@hCY(0`J1{{9lLDV%CZbtE>>Pcke}dR9 zURah(C}9{u`T&>-Se5}}>S5sJO|+Z(QoM^DOml0Fx(~3#8i@54U?~Ts>|v5Q3;wxE zr1mlqf4~*N9&k+m81WLAKrkiiXalA>mnoqpgd=gU`>rd1@N3MhO^w^oDBt04kLCPT z*|YhpWst>_-`3r&_k8Tug{J$;U(x?Xbpmk}_PS{9{C}hAEZd^&!ZkcYD=kWwfP{2+ ziGb1}-QC?a42^VmcSv^*NJ@7%N_RK=dG|i{AAm1BthMg@yv~a<*ifg|PHZv65ls?x z<8lAUyVb;8pwPZQKQWEneW6N>!-re=EMRyD=b#;O9dA>XySvu)t_@F|cZI;K;b#$Q>%df&^)p@-3HkQ8PFhPK85yd{mBG>LS^rR3 zy}wE!XPerCoe;ZUC0Pu4@yl~-rufY1q_gLqKh?oPiLo9P5^KuS*{()CJw4XIi%ljw zfiD`)nP8g_gP`!r;K~J&)<^(*_M$8*cU!=>&CtV)eho8~W4v)Sw8od(GN{l!eRQzv zlrIa`Z27zIO*is+k1&DyLK4;UmFI?<7{0ltaU$pgyXL@D0wPei^!xnQ{NMhcLVC@a z>c`aau1CsgOy;i1pH`skIS!O+!aJXLt~{~b_jl0G^%yy3to^=iY{|IR%=bQA*6_nI zHJxI?r$s*Ex+LpLy*%OU2CJa;d3pK51blXUG!?OQYDG$g<@Pw6J+NFK#Jt{3)**+oVGa(WVgsjW>^T*>S3P}9C^gbXFpPphcu^r4Qa)Sb6+qI8wRADm%_=NgFe z&}mHo@A1})A^Lp)w=MvjgSrjyR{-OJ0wm?=APR)6pWk$c0Gfh0$m_EkCvm49AnLYU zqXw2oxJ3avq02y3d9fwMek{f#Xmb_I#5L7Y0tC#NlCNjQnrqq}k?L5eBYp0%D(XgI|L? zK1GJiQlCC8L_(jF_iGNr&06sM-ihbPhtRf~ep`gZ27iQaOY{8|Xrv7^3Kqb3ZbtBC z2rxD`9$yDYYqt69VB+~G2NOjzE3*mm+1hUaN&|FiMY%xrAa5w)X*s~SgVhCNEt zfzI!t{~uO5vFwkSnpi*Mxqv?daR z9`zkF9`f#Ecd(0lr>EBm!!dIHirt!yL7AoaBKF4SwPLKcw8oU8XyIqT8ovt*b2Bs6 ziR@l2jirT!7~{t*iH1#>Muz&G<#S4XcPE9lej{g`@`qcI@35gy(>{?F^TZ+0L~ z@^&OLL8n|G;^;|AM>^=CUyDtyd%Z@2;koPDSCVC~tvax)JYKiBQISz;uX0R+(YXUQ z>1EaIEO!1XCkRt~$0^eY%T7LJ%xO|3UK93n_EqT2Rg7D${F&j?rZmhqH{d?--+q-x zGKZ=Qx7Gld)G%tcIJUeaBqAEuUt~doHcbKmewT|&-H%7$)^lI;kR{ZGCwpgT>CHPj zIH=!sYxQt79sR)$m;n*=9!n*DE}Q`Hy0L#0jb%5%=tQg&d}%K)UPAEQ@}*qu5yZS{ zKqz>3*z%g2z8cSs|4>c)uElcpa~hBj_7!bq9~X#cYTkfLAu z0;(Jp_O1nB|Hup132FkCz4D#_9RFTD6g;Soj&ui<{TnHwN@fUg*9+4`IlXoXG3t>3+yO)+*8SO6ijY zs;9Ct;IE#T{TL2#IrH|O8gCd}4|$3&xj6IRXt8XqkRje7ZJ$5*OXh%?rFOp32P8Pz zZ{049VB5ocnG6u(~sffh(H>hc6E+_sZDqtb>7a@9!Q z#Jp(>%0d2w`76-%ZtrW%CoaQZv#N*q3kLrtZK^M{#``e$J_t|jeBDbH>P1V1O`!X* z%~@zS+r%&~_Tp1s`nyhsyVk&at*Y_Pd0cY=_N`Pt;i!tvLEanBUww8FlgrmFE^)|Y z0(_km8{p1?d2g>ZQb}xcB^rO*7M6B~>NJ%RJs~O?vvUe_kcX&KGTvm~wWJn z`3ZzMoy*?zSO<@4L5RhSk*T1!`Rz<@4SOiLW4q7dSA8<(vU-DCu;w^HPI*i4<&z7V zcjdag#{CYn<9wzr+eC%+uX^cHol=U-6`r$Vi9^1dL&UB<;5peLTtTZ~A~a@!C;2BM z*8CI0)J=DypA091rfH$97v_YC;JaDV z#*;1#r+CM-1$NC>Vl{)1?~lRh>xb~^{nKws{ce*~1`sRWl^9is_`-)gbl^MLULxo! zjEK^4`?P#cZZ!Qcd4m-W`FWD|bD)1--+j4FH)G!-*i_}{milfsW!qKrxy$UevYq0= zOf84hv*^Fq+MwZmyRhruXB5i>XJqd(S@6tU%C#7Os0wvmKZ5^e-EanM%tDI3GNR;|=|KepslgadYxl zi(Z1>28Q#u)Kz+!2$ouXFS|2=h5PmFFl!^M5sa$w@UC-WFvz6AYCiARF9yW4kJ21` zOC@iXb3}cZQmd2MwJ%!);O8eyC-4w9j>%v5Kq*KHJYk1j2Dz*Zd5AI9wyU1n$=tEg zls}R_fp`GlA7+>S405F&3_KqunA1;*`UAx8;`TTB?FWRrp4D|Up5I6b!d>}~wEWPA z--Ew`Y>mCP@E`P<5BA~kd$(T)QGyr;ANnX9$iWCE(OiS(GbOQ_R~Vif$wZ=aW4$GQO?WsV4dqif)F{+0@d*i%nC#Ai$_cY_W(}u^cc#b;upxK1s^G#Q@Us;BU zTBhh{dT{UY#LPW%ZsPng@pXW>gb)oK=_s`7?rh1;BcTs9PLPi+7A8WF(-jvf?mR1< zdUX>yrXLf*SoL^+=+62Ez1&WT-T$3Yq?IW+pjy>38MoP%tC%cjH6!)yQZtSHs>Z;t zosm_uW|RCidy5Phz$2 zkj7-fP5NTLk$q(#V~Pr7$lEeqo8*NyH2uX=t~1$0YrUw(S&EV$S{ngO^InIFc(R(3 z$IGRr)&41pqD+)B9P=d^k7{O1j=$J?^a>7oso(D`RXq~K-*;e(NIIr(=PT|F%^59qXL+@}B;~MeP;q$X{*i`l z2cvC4SZLe(CYMyBbAQ5HJ&daNkef7Na@~)wvU$hXE3pqxq3-`0Z<-h|n=7g}-hR0{ zoD(4AvLX8C7!irak>`O1;`Vau(Br}&JZmm@!Wans~&IcdGq(j{P|pz))LYSNJT445KlCvY|>yM#f$9bUJpH&A49O43v0=J23d z;9^q*5`hMSWL)f3xw7-LL%`a$ZSW8eOz=-)egVp(Tu`}zNaS&r}?Dk+w z6zq>}>)T5$u&SL1hMo_7!p&mD*H%Wzh+=P1f7|z1{7gtAc8_2}DW7)DL%)A8ju+%l@-yc8RW4sz7AI}br6JtDLhD2GOoGsi^RCeq|AMkse@q4iangsu3giXuUnXvXt z_XR0h`(?8>=Hn^2Q*QEdsT~DB`Z%B3%D?~&Z$*pQB~Q!pO)V8K@~h4QJaU*je@WMJ zN{TvS+3`w?w22GcuX4rwR=5>8!3JjLoZ0#JO2%M4$DbZG?A|osoKRUBXciHdwZg{7 zez3Xt4K2tV)UdeN>B5ZPj@UI1oJ0-#Yzk~3aypPM_#If}%N)=T<)Z2mvD8x%kE)Lc zB?o;SL<%*B&ij3Yi$l^xBY(FD1G?6y(`Q+04L)iO;gk2^YgDMaqY?cD$0S9lWE>)R zfi$Bw`~Eq@WgF8Dc8p< z#>x!-^0}|`hQqR^e5qZn(|M16Y=exE{Xibgb;JTcAh;xvgn!nsFBE}J&-G#QZ}yh} zPnCFj1y~dk`NNxlsz*ec`X|~+*#SvGTs;11HOjky>`Q`gv%KB|5CUzhpa0+nUGkWB zKTN2OeOt|RiE+Kiv&~6{iQ;10K?qh{8=Q*p@9+*Gz ze(?%>{lr9Kzgzj0^h}AO=j>)L;ZPxUL$=?7h-^V=MtoQwfVU~N-M5-yzHt#ayPhMoQ%yG1%PD$B4zf&j<6#l~~p`eqM-$oa63 z)me7<&3U}4@swnFows3`=zo`7r*R1)y&F_mY(l6KbQF~DV&Vl&$;O<25EDW zaZd|>MD;aP3{%H1lu<75)<&IL*g^OB)*TzIrj}${e`$Pr{bJj&oxgWJR(ho{(B+2Z z6XA9Q@U$ZP6d-b!r#mh4zOWB(QN(Tz0YP+k^yz6X&`&OVT$3^nOoilus42f}>P4z1 zh>@LL8Qf#kA5jD}`i#UGgekp28Z_NVQOfFGoqK-7oB-sSU)W7$%s`C_e)#|}jG6qd zqz|=SP^`{uTvdb`DrlSzh@xfv5{@aMoRhw(8nw;ZYxzaiO~< ziurqhWL>$IToR==^DkU`2AfHkW(vk791D>~Bd9($m!9hd$m8oOCk>ljGsKc?C;3urpSSZepEOIru@r1t&y zqm~YhfP5($^aR%~{v;7aTv{9El8qJ3X*D+i1pL1MlZNQHko$m2AjIL2^dK0>1Linc zhruqeUp$??#k=mc*3Un{k83f*iBj4x=HOdSc7YOJPLsZBcHDzHE;LAeZlpiHvY?^a z{LyGLb3qkQq7c-3Q8kD6E<9CVu3B`BIdR8HrS`)#iN}2E$Tt`byBfReivD##_hG+m zmz@q1mI=QuEz@KFZXh?^-AZ43)$)VIO1TEwoTl@=iTaX;#4Hs=z{&elT3cQW=|hZP z7N;qBZ`svO_c=#(yw6xyQiIVuUyUeL=6qk4%a1|*8A|2I#IWI^Vh1xS zzOb|hH6g(Zaop!?eRQqYOX0s0fn>+2a<oAdCeUw%6}l2zPRNnexhbmyfHQs-0f(R=!>S^zu@9 zNt_YFZ`XNXd=*O_*}6}H@kI<{9|d@sXKFp)XRb~?t%*=9~Esv1O`FEaTyDM`|Rg#R#pcziHm8d*Udz`hZ7ymKr;<+w^Z^=u$r&TZE^Cw zW%g{en6{t}fNH=6_H)B%$5j86DTYQn8I0xi}CH=j<#)vm6 zl^I{8lNfUQ!tu&oBg9=le?!Kd#2|M9h%JaNs57pw3`t!VM9rkrns8mIBv_)H9&hv# z#n`AB%$MblhtLMHXHu{3vZIH}7{9C1X-fj6+01<*GCvZ`Ccb-rA&L@hMdy*AzRLD} zDSbl3Wc#_>zSi`4@5O31oXo7|^oC%Nu%GN*b1;|% zl?qe;4GE6{j*w`V*7R6p}LQ3*+MD0B8b{Z zoLSvqnjwRn<8Ko;R{);L2oR9vYMTEp$wb|Oy#+JG5ix|)}k68;(%koB}ttWd;a(~gU#vS3V417*qt+vu^ z^Ua@--G#~QnZzWacitrbhmoxrBYi!%ljSz5`)8oc;wy&ig>TE`YD!Bz6ny{oE!NuR zaMIkPL&i`Sqz%dMzV7C5cUc2Z#F#$!Wd}zunX%e>7W+UToosWhYSzsK&g>(+fUQs7 zHlePy%>&zU+J;u|9pt<7wuNboyW0);;N?#4C5eu*!^eT*Y%At-l)mk3AWE96xUAHM zuzA^eO=8R+kLTU}DE+F$TZN4IO*V4wYZmxfSzhV;7EzV`q%69tixru)EBUCQ7ukt} z@`J{N;-Ec<_VD$*4~XL6pz#L0*$VTuq0d2oyqSKpGG8C|{4-4fdiw7;Zhr`4C*|-$ zFAEmErt55cD%vXO81e4y-6XZJP~r2p6RR^gll_dJNS;NKE_@AH9WQJLH&L0LJun3= z(np8Oqs*)QE*vL5919M!Gd@M5Mz^o=?~$gCZ_B)PPxFf9 zrCbPi8a#lhuEG#XPGA5Qn*?}a3su9|c!QsjjmqJ*Yx@ivJ9flqghEbbgc+`Q z?ZBJu$``LDOy$3-+x#qJ|2m^sK^D1W4>6x29)*?y1Lr+$Z6t-45RU3Dq+v`%o%_3T zY~mO<7pYX9&XC?8$w5YU1p|b;3tyA?Ht=H25}AROV@!EADu6KjC3Ld3*Ep{2ANihi zLjUj))8+w*x3*hOgBJ4%)aFp(M}@>A(RN35e1_?OlerMCO&lO!Wx!B7i9r$+6eLx& zfFwvtqKAD3THm(n6toK`@H-6Y|C}>yp-uuxczj`fbCrF$wO3(xNypnwP{FGNJw0GV z-usQr4qwEdUi{#dg(ZHvX3K1IhMp{r|5|oqXNN^#<{VXIR=G7R({QAwh(UfE-==K7 z*YikAIe>B1;jF{vV)xQ*ErM(KWi#_bI1xDxZ3yAtp{pf#YB;bkLIKqpn zPRnJ~*!QePuR07A1Xei~2#!WKj=n)VwD4KxOQ9}$LVjR;q{PC`imPq0?PpVm|91gg z{Zagr5t$(hW5Pt28*c8YAzP%+vK1TB>2EVunUnol5b~&!>rh-Uap-HT7-knMUY@yVU|g>`KwPnV z$}mDt_{xK)6F#G?c-0%w(g>^u+?Z#*FO5bLvp|EN~73kxg39rL8ylDF4_$D zq>wYi??_@3+G=YwDHH+L;r1+ve=fy68(T@r$2GkK=`{^{9P1)Tk%FOCz@1CAkO=rN zez;iY=NsLzzJ9H2O6%81N#xmPOFk^Vz=20QG2G;u`6Etb`H;s{mZ*TNKG=$~41SqL z<#S+8MsMelldTZ9us~M{HnX&py`~ig@z2aof!0YLgksBWY+yWGj)if?%j9AxJ_}b|SKs-gSjkHML@-$H zap59yKtd=;_mIsa(DX*wZ_XTmFw&4vuPn&0+K*VH{k}ZHemwtfeEJY+K(Tm7kt3EQQl{)D#49!iP~L`2Bbco#4a}2;`1_tc@1-jGb#I%vEQAQocQ%K zhFtiC+biBZCtDqL4m5-)`rc-Rzbn=A+GWF-Xfl`ww`SD&ZWDa{qG~0;B_x5w9aW)U zwSeCzY^>MG7ZfbenyhNa{OIvsuy54}G88xTh1A%3!3SFDK_9Tz*2*@|TG6$*j=Udq z8Y#Iu^hHwR;fShSS+gGbh7WNErQWAaAiC1Nbo|hC>>@LW!%0?Xw7oTnDBS9Cv_lCc zok;Lx3JBu;RU1ERsF2U1HvxZN523)QL0aTR!-z zu00}^&K_>Q!&fJF#<05oL|{S!<2(dC_ip!ex&-P6!n79Gw-lCtJU$Z>ETZ}}#1g4E zzOXC~b(ym`wlzI9FlJXi+p1OPO13krWrdyfEi<<9XM=+>p8Id3bTSyV2)poDk=@_& zJ~XC}&!3SInH79$_V0LCt!$Z|9T(?X{7iuUP9;;XxO1!Cr;#b0yqu?=Ek7npF9Y%o z&o;e5F!^E5^`&m{&m2Y6Yo;&h>U?4Rs%|=-A0+`8WLB$=e*{`UoWO==2uwwdYuD#S z9St+fE4Vn(|LFQ+82dJ%T&0{Ez-uIj&!8fQJ6WAWgFa;3#DLX z%tzK)qsW?2ZYfTER)Gj?ek}T#!~gQTDnv_@NNij0?>ZQ2S@qxwzA!ZccD~PKCdAgR zaNQrtj9eD+F*oYnPS^eu!N5q)NPKJymccC>eWX;EP$%Q*yk0n5ivY|i%$L`vUEvqc zzH0C#N)|9nmeWa|@Z;-EN$=E_*T9AJ@NNYvq0@$cr1At){MWht78HWsECHg0A9)3! zktf$LTwFe+A5j~BR!}T2Cs(2H_!=?k33;x$W1|rI+3gN+(#-BoPj8nY`Vemu3+9B#0e6v`9iBHTi*v(-vYish*F-B@2$?R@yn z7fswG7oPK~llYCXc?7(dJNbs=<+VOS$ADq8(^Cr3^X7o+VC)~B|74Z)+x;}WI&En_ z?WYAHPiGm5%z43$WEI_E&)BQk=fu66rn=UyQhohRMnuZ9nEla%o56Db!&M(H1 zuxv?oGl{V8JT1^EcamJq6U|I6dB@DoJeq{j^qXhBNU1`exmONm{M(}=A7@Il zi&iiU^EpAZn8Uu4o6iqob=TT{Cq~($yw6yaPIUWW@)H@JQG5AQS9H_aq!FbO4PSzj zdZ(H2o4v*A+Gn3YHu~z0=D=SY(y$fLl6cKEbF~w&rQvrGCwj>M&9?E;aTe(669rsJ z`q(Gi2kwpHuY4-G11~bg_xwJ>Ts}@O+x^(L9kUJs67Nu8wnh_Y}^nnNF0Ad?_QeV!p(xM2r4!VTcQ6Pz_2e z0zFiJjIRTtH}yV`t}4Q+A~ir1l@5w?ytv4VwnFklpB&Wt_b-*@QvFXx=f@k9vexDp zB5Z8&(KLYPvstYD*M2sK%So!Qo6c3iTi(D1q|dbz3x9G!U4ayL&h_E^KJX1L?6VHw1u;^=xY32w0Kkv(cC0OBFA*`P;A0;1iy!Sf=~I` z+A{bT5p#^+^{7PTh+6hYXeq$~9xAaA(RaLoYD@_^+Xq^F8U=h@iXI+N z*oGuE;{j%s9!z%;3?iw0hhL)G-wLRX#yO062S*MJi|gRH3b|OH5GEK? zf5Z9bkp!Y?Fvx{?Hdcc*szFNNz(b9Wu*H9fsl6q2;S2s7PU&1WKjzrRa+S?De+OLw z-(A^58y^Zf>D~Z!<43MDMXs}r)-U{9{;s_kfvzOZ5=3{KZrMUUGyc}0KYIx@CQ{%_ z?a(rtD<`9Fkq*eksPBNaP*LsA-0A!a0)-_V*wS;f< zT%-zJr_#!o`;7(;&W`C_vgh)Uz}FrEL}$xlk@QO1wg{^`S93z~g)fu7W0A#7jCVca z@S@|V1k%2h40)K4w@+gDQc+sI**N)orSx$qCcqb}wZ1zUfa4VEi*vEQJeEN_Yd>AS zSRXd5u);MXsLbP6XRX)#IklB*JpWa0h&n+7X{jV@yg`_ATankIiHm&3YeUmr65O5=Kb55kHM$+fu*x>w3#9%3?nVqIbs_ ze{!Jwv$YtpYt)vVN3zH7TDy!@x^4+CEs?$&F@KUgHNu()*@L%Kca_jC zgE^~H-`cw!5UyuSca9%4&Km9&c9sTCq3NNOiMhRYrmyJ|(tc=2uk-a?%8)Le#}u}j zEn1iY*zJZZ<-Z&VJ4t*`&J*v--= z;JX8%xqYCbvB;{jB?`=E^s#0rv3x3mV4C2)fL}M4ai~8@^byezP!FS$D663mGY-|J z61*6sbnZqJ+rQt{%W+%x(P9Pg^U!NqeF7WvqXpJ>g+)SAvRskhcChEPf8#ml+jfeJ zIiS?n%k0BFKV;H^;_MeP*(`z7WmNo>atVNDqKd?4)%J6YqeG362Z{CMi3HQ(-uXx0 zuKU6c;OspC&#J@RS;NrjVx1X%&WD^`Niv_W>fg{2QpwA;XZO4oJG{8fzQAN~sAUwq zL&*%6I=tMX);S@-i*UOiK~VVRjuL28l6~nLv~53^Amp|s?f_KuL(HLwARt|ej4619 zC7a2oVgZlx4D=(FMI6+P3u*1cp8>y4{sAZ)+<(4ZF9vE3)WN{cgkU0HGFnxH3*lQt z^(!E{*jw{D{TFnPOUFAX00G+3N&sJmRPxy2jxer5z;o>&kv9aGCYJ(UZK>cFI0cf8 zYKF0*Taqv%=o}~%cE3ZESc4XWpE?Uf9Wg4*^q+wqogb;)pe^RsJql027(09Y2>gap z*93nN1^`&B6xk6o$fvm+lq7H0%adG_8;4_t3t@zFxvP0iXJ&i7v(n90^ETaZJ71#k zJbKoCfQ=K}k&sHtdVrhSbVpShRW_$EY{3=SRDt<4S=$Pl$#8@z77cb=uoCbJ{Cd=g5m@hJi8?H%{#{GDy@T6WSS zm=Crc-V}N<@c@4UNTjIVQMD}7*W6C;VaP)&p1V_C5Yg>)${N~CQk>>YRDmudJh_<~ zGWmQN1?MM!yb$(rMt4UHCz5H;jM>NTM3oXRd@d~?+VV@v>Sk|^RNWoNGp&D7RkupA zNZxMt=qDr^;?73nK=e)ele)!b;}=g;RZw;94cWnEtM~JoDIM~pf1QRNiBcr4?QX;^ z0m0Q2G5noPe?ySn^IQp?R**D5Ppooq6^`EbC0sqV`36_}<_lbwZ~W#{H};Dk^yLAx)YZSHL5`W4O7`9E6W<_T|N;cDdqs^4@DNpEI?drz57Zc4SzqXrhN^ zvCgWA6~$|vtZlXTZW*?gB70iNA=Pk4ez+ z{qC#)oTD*@{ZYZRK%TQ%b7k!J2nJ&;L#ECy(^cCM@V5tt2nPLylJO5qf}j6)NOZxO zoyBc}<;O2!|E!8vT_ca zed5H7nsIGWnffN6Mwb%>8--M)?HsY5nOL&)P47#ORu9Lcid$!oSU*f_I;DnHomR;l zzt)As*u2qfxCRAudhI`-CzgooY~KVE2F`b6eD|&VyoJe|XOrFp?UWMg=Y0yAr&ayXYISg55_-emEO1V;OA4T zEao~bh+76|J#jf~uQEDD48+2BNepQ@7SFg(#BbNg5WeZa^~;%NLx;fYhT_LRZUwkn`uk^^hd$~?UF*Y z^#pl8!VS4c2W?3`mO$ddtGZ>5m6q?HIo6XmwUYgOz#mU!Ay)I)?~f&YG?8FC~oQJVG;p(C#(W z-k^?3rQcwMS;A(~g2>Tg@&rz|EAsW5sr?7kk&o%FWM!Sz0?F!4GxJU|o9wyYQ{BGQlhW3s1P-a{`t)4E{5)G;MtJ9M$)??_fV*t&7 z&WdYa81}1%2s`*(Duf$-z}R@C4(MMH@v-dVhwBu!{gWF#AH-zE>W}AlsJV&}afqb= zm)vkM2wd6UgXt#_*DG>J+yQ^;l9LlWOdgcs5dYJ@>cbgcgPUTx8j3!vF-i*cu~NPo zG8?-7d#FL~gCLwP)J{C(7%o#>yjhQ+7VuMIHb7mz5OM9j|INB2Viwj6<%n!^I^i9sdVFOaZ|LaKJ!D`?9T#8$xl-Qu=fh(56Go9 zF`lh)DUJ|9zFItHRMkHL&xfa4jhsbYcm?6C}XW-<4P@vtBEAeNqUl~)eXW9 z{6L59(#c>sVWt@IhTAdb(-{g2kz_W*P%e(8gkqZ5ceu^tS=y*2mb>L0jWGrj5(j~K z&gQYY#70x8otTE*1))>j^_MJo{bqlwJAWh&SIBSuH)h*OYOS?4Z!^^c)J}F}>=KN- zI>Snx#k($P$9j*)$;;(-*zms6_9?8#iR&c(17}zLCYU12R9Q~)pub>4#hfS)hfl4f zO}0On8J)5@@;Qw(7>##7=cr@UPibchpS$l`kbGGZyN=J>D-Y0=yL^(bB`|@Sh-n#)6ZT~X@KcvxOZ?d z!wpCy$not{wF0Rel~G-gp-!8t3J9OnWg!1`@b0TA)xm?IFW?8i*ghrt(V>-qB2u9Y zM)u(;6LzM313d>^xV4hSa+xaV>q0&cpY3sEEB0jMp4J=Sqj@n!f ziw>~iy8<(S(9uJkJ0jW#EG|YyS;h+_!NGQ$*|NmXpTO%TQDS{yPOnW=OEfWCKo>si zZ%!e$L!FROdoX|9jXYB#VPk$5X6?$b6|96AiZ|o|x7I7Jz3%J{ip9D@ET$@Vfs`sj@7 z^RoWFY!hVfd!yC)d6nhG`w~;Yo6hs%;wz!Md~ij*7+NxAr&~&@u>U>t!G~c^w_EnJ zba^qa1>Xso2j1wctb?a*d$NGWo~$7S)Q0DN&LkJlf6D@CO<{*j>z0++gX?}J%N1UG zZ<+d7Zk(O^(Qt!^UPgpGQxB1EZHQ>aKrPCNQdg`&uKZZUT6>i$ zhQ{VgNQrnW)j0hRR5%Aj9!{Ql;#q`Mw@%;v^A~C^d+V~TguV&E9@%kxr@XTZ1WV*0 z$=|0vCOL&27W!|c^N8fx1A5w8}w>05ikbaGh=e|85*O8$f{}37eddq3& zqnt2cP<916)|zi@xRLbEX$4yiB&i|^Ii+k~z~1CSdp2`MH6QfJp7^h4l%E-@(Vw~3 zz3X7;$eu+Nr9J-;au_U+^Dutx!P6gkZTKO}o>hu)hxc}#qAq0w0U>aN8rX%>U zQ}dp7D49z{O^BSeIUd<0M%;az6U<(yYs+#$WD1j)LfHi{Bane+fU$K?@2A>{l7XJy zzx;lqpoMg9yOS9bRU&77a4RrOqt8)A0uNTGl;gho*=Gu;t8F2)ozozr+(`WbzZju% zBjpV*lePV~{WNkr5g5iaK)Cx(=y`Yu_uy>$QAd~~NrL5$s6z0|^I_gI$9@PXMp<~4 z*URzH_A>fcEz5l(^Tl4xA8<~c)8)5^JIJ=fH=6q zaHcVmYyft&FY~Z>@_x*nvlC(Syl6mpYpH*|+Q#UJpf6^+S(R$Fufg}!XB@3{g^$DU zK*RDRYz?N0HyIG`)I8DyZr%2Gf2n4>@)3qXPdo<%`8`&6&u6N9(3p3UB&E4<9d;_u z3q}6s3E9_kip8A&vE2w+u(C{#daq%#L+{ zpy_`a{4zE_AuVjW%+}hi*L6*fOt@}gulc!bw9CDO*{;=}647~p&)aj?KKTAz3ftSu z%fwveKxzFcB%*gayCTS0`qa3rkD!Y)o{}yATxC^MFwMCd_gnW!*7*MRrU|T<{W@hN zY=Un>`4q7OFCh4`)Xwl=Zh#SKG=aCle31Y4xt1w=?Umo&*Xi>vXpK>lya&&OYVNoJ zG%7T$rMf-aD6Q~Y^r^oSakyj$v|x9Z_@`tvbu80zao64D0rDHp>R)1G(iFFG>=B2s!F|=$-X7cJ*qMOkx!+t`-vkd5u=vbZG2hW0?l*YV4{IE z8CEj<=%UdMDv*xc7HZh0lb;Q!`F7ctCpmfszNgpxk%vn7Neh9Xm@eb~5yr$>qNr3~ zi&@Bm^xC`_J8yOMUwnxa3~nqy)~JB9{m8G@x&568B*y(tcOTh3WK>G-I3`*~ie8l3 z+F1E1@Eyju@GHe=Dtj`!_oYfOvxox2ul^&*qX~53lUKc1c038K&N-lRnwTn-{xE?` ze)b>vrqQC_Weu2zKT{88Am+4LFR}r(lTAN#)Mr8vp%G|m3`syIVRk8lQhhWDe^N|K z`TXDqFVoSqLSqqbYeF9T95~OKg>)Q^T~Mb!4vShpXyd(z`cfB7pwR-kt9~vgg937P z3^+1k7vKdm*h|Zo3_r8|dPFDUDnUG`4tW87(j+a?E@Ib*%sB_gs0g#WXn^bc8*~*^ z7s5QjYB5FC&_&4R6y&`6(164*nnIL`{Ew8S3HEdm-ijM@?)B$W@?Gu7@qF3d`OSbU z!pf=rK%-vPwJyT%FcA}7^W%||Zae$KCb9yz;tvxh0dN|U>h0(RU0-xG`Cy5B9N`6< z&nc&`i)|Pn`mh3>ztw*;4Bo_1;bUQ+|Bwmg4Y{09_H_~v9ET2NZjRZTjP>FGk=j)ya#>1n^jv;EH!ljo)r&AUT+$#Wv4u}h*Ka}H8rA!|;$Gg+Am z--dZ5kG7M!3xcfN9OTwknp^1QNtBZLoMx_5E{WI_SD-?vWBjX5Us*2hEa~{+MhWj= z0)w&+PQQ7r+@>-bWGp>MiF8t)Fe$v7c24ph2imAo1jE^R{9D?kbmK0&l8J>Y7;26QwAg$Fitbq=15Hs?qz~s_VY(OcPX50P5j#^60S4w znix!wAz4O=iHo5V=urucy@qA48v@azBwp8c@`!h0|#oG1e0*PAP^rXN>E@J*%4#Z$%E z5JSgmE~~i}tcspslDG1L?2b$}6qt;LS zTd4nKZa=2Sa%}^-)+h{QFv$;NrU~%YhrnO5Lr9T7=c#*J8R1uX3Q%+%`#s$sYu11+ ziPG=V{)09nyAxs#*%qZ`D0c*@Wqit+$rM=7E{M$F%l6bA*&uxIqzMhpZ4r#j?jo2n zu{P6b2=YKF6p^UsaXDO$pwah`HUjS#}hg$Ea zTIY`+H7s{<63O;uXm}E~IfX+D*b{8@6mALF>a{QARf^8#?NQK5bN{+5<$T`$U1MZx zY~7BCvmIk1*H$tZRP+`p-OZ&U27Q%Rj;X*o+T0w=N%^-O^#Lsw`AX2<>7GrXI1VJ8 zrSPE7>Z5Q|qF_#_9NRLe%EKbw#KiIG#@+yW0_GCRc$uB?hRAJT&BF;}akdc6R&y$+ z<94erdc`23d#W9D*Xg9$m71z%CC79SGzj{WwHw-~^OuT%bMiN9?!wB3fTX=)-bF zU7%$`XRn<}j^rKEdQ_0%bUEJ@;rg8RwLEJ**>W?%yaQT0Nrfk>T(D8y$gr+fkL2T1 zKia3np)Jn4gqPJm5Ld-UrVzs_hL8dLjd0zvI=kuP2+irxicpn(1=@ouwCAuF+%{?U z&si@Z=cMt5TQ{=J`Q-s?#o;71hIN6Ay@rHBx!4~08lA8Su0t}loO!}2GC|43WUtuaz;2&%^Y10ng1enH+%r;pBCR9j81|VWwVM(=9F9#tP+q3J_mrn*x?ZyaPn`p)&_$N5;b ziMQ6q8>*Y7a#!PGpbLc5D9W3wD8$3qlTiE}D3~{mxGuWX?NUTwueXiPbL}tgin#Y1 z@#d%btyhbzHD$oGw2PyAycJ=zswkl)WL_n?Xuv{lPd6**c!pQ>1XB?Eq-5Z!z;tB-e=Z?Jf!TT6_Coc>n3Q(UbMT(jlB zlN3pNY*e!}9ff#3z}lhunIC0|T6HAl=!jjvfl=YTRY5nSpdJ^+9Bmgog}FkFFFd%u zI++hGkuC(s^bb-8hn{w@ulTFqIg3r?SpM{t1Wf{Mft_A zFok2(L;{?Rc_+l3P)$4iJBsaJOn!Km2m{(1%vJ zO$6!%pC+4VGIw{G;4FzUiF-W0BZT&dBZ}Bu2Jp46wu8iF?pQjhpgAh0(hpl&p)N;l zuKRm!3;~-eo1neJ)z|O>?}ab7vG?}t@}WGHHcP4*^_3wv+YxgnA4ctoH=w25$x3D7 zyWo?>X=jvNTZnbm4$3Ky-?{nlPoF!kp@f`a1>LyY5&N)Y(zd=Q> z$y*!C_SpdHkiNaX+(0YScv2*PQ^D*!+keH+-jr))8Qhpd*5xh>Uo>4~bKO&0p@k}5 zgHNYIMqzY4@xyDXV_Z8bj;w3iMd#d-WqS)h$_`dyXjn?qhHM|T$Jq}rEPSO73?K_P zwg0WO?a%-pM5lN7beWw0;~{4YVksvr;xT!0@<0;^X3fhlBQ7yoVB0^EAogm&8el&L z&b9f#(_=7T3^iLkbU^CC&gHDai?Y22^WB~>$JD82iKzdQk1Ymb$1*=O%){2?=n?kZ zjWE*7DgSB1c0i%D8Fw19+=hBQ^&i19hRCfhdF<2q;GxsJ1Cc{LdIhfV6C$lSEWiMN zV1Gh^u^=Yq*;LHPltXSVSUpfVoRD6q{bGx)7UPH?pqyv4gC^h=NGAU-6S7)-E-5Eo zg-qsf*Ip48`f%b7phWaW^LHdu^Wv9E;5W_u%h>1L1?-=1kE*(?->k{gD-~IH=Fr&f}oe$EJnX; z3OEmK#?))(vLR-1pmvUY9fMm~)7!rnPS4#p(JJ#zja4~`Dfh*Sb>2FDX;&3!y3Gu8 z>Cn`2@yMB%aI`fQhQxy6aE26Ke}S`CK4{LO^ru7Q#i2@T&NWV+sS0d66Dyh0%zREn z4CLVfT=q)?2ZyV?z&ah+Sv$o>5baQekhVocEdOgvnLF<_IIKnt_mvWC7(BPx`9aS( zx*IX^GzXLL!!AG1-ylY!fihFmq~@c4@*&1*sqBU3eEUP5%kg#$x4VGR=Brs>q1eJy z+cCjoF>c=C;*_{g-)U0{ae8sy|-h%u{k^@Rf*04Sa?r zx#vJ7je@?(j_UpZ$m@LPi)naxcost`Bg%q1wCMHvtzPP#PC(q<#1%S|tJJJ?{IJ^S zbLV-(aJi+QjEyGF&d#wau<9Oxi^k1odU{PRJQrt;K&D==AdZgBC#>ahLQnEaRv63T zMYBh>uL3eZ-%7^U9`9&Hkgz(T>jHp|fjxGXXWWFFn$ zQ%w|KqPX%rQ5VDZsr7#Ygp>aW=bW0~vQd?|j!ZSy*ck6?%`qHXeY766Kj4J&x=(26 z6LExJyPhZ2aV}Igekl-Ns0ML6{OP6G?Wq>Hclzk=Z*qe2d!5IM?zBVcJQyUBc|XtP zzo!=-_r2;iLZU<1ZTEuB#uWIDQWb0kmMF4)DgB*PE=%d%NA@JMGCb6iY(=$?sDqe` zC8pg=&rp7I-%|g+tC9~GB6p|l*kJR?*OX3Mt4Fn!TN54*Ur`=4+>4*;1Onm)wIxhS z?ZLl!{%-W$jU7f?k`#GL{tl<07PTZGYt1$g!#3h{&Z(?RJIU?``x1U50y4sSyJ&h| z`ha*HAMo!<>qF)xFaNo9#0%`{WLE||MQQqvYL6jw-+Z7J1;^J>9p`;jwWB}aofNBq z#dgf{%xVXbNCxcY2ua_DVPrV1E}z}LB~a^u+D+2Q{kyaw0*H3_P2c~6-%%UCw} zDK^TUMu=xq!a4N_yi+jq}||MEcbk9q4aR&Cq2xVug0% zmrUpB=LFD;lUZr29%~+VZ;}z?_8s}Sak@?tu-fr5B&-H+)qV4D#7;ZO@sE>l2=|e3 zSHqN6pZA|Pqp1@?(b*J%7m`e(qDwYHHvlV!Rs&x|%IhbNk{MSixPbYnbc1Imf}<|f z>>!br5MOd=y6 z#2(JN_9s7Z7sy3ZQbYfdcC!h-YW^=zY{h-800{4%fTK-b0#V$#0PPQpG@FUI^#uz{ z7!V`&(hE>L4QB*|$jM#;T@@prSogr%hlk6t{l?vFKB1Xyi*2ZVTYA_tYU%%?!aH!) zOMxx+>rYwJ;5ym^khA^)(t~9RT5%x*kVA=O?MSaJQN`~zU&0%~m&1JghMiBh&=(cr z0bb0Z|HBqzjl+gCAG>U~zWa)6NbRc+XbkhpL=7x6Pw`GdF$jeM;>0fc8$#?~yMIM!C=WvcfVwFu7eU52roMR*W)}*dzG_Z z(1rZc=Qb!j6?FBF!qO3UZIy8{nXLsfXu--W*3H=87`0O$%d;Xt-UtL{qw$J7~GD)j3YgVh* z(B<7o4xYt@boANQOUaO`)U%b8`e+7d=5EWa+PWl>>hAcj&u3fy=chuQR0+*`r+R9m zVQ~*8wBF5^SPHUP!W2?oe5y+0bPAj_qSj-62!WhAW7rUMtGu{R4aa#=#mmFw5*TpW z(c=dUvZ+(<{vyU%as}fkUE^7N)c}I8Wkz$GZFcgys5LFyy+z!my)9F&jn9?l*GnGZ zrr=fufUKEYI4vBiGIJ)^xv6|xix!{dE7lk!MaMW*(#Gt=r-g(PRH_$Wzz_SO6__M# zYgG?}PddLs7D$v-nC65|H|tDJu)T+`HS5P+eS{85`$9Nts18^q3=XvkJbh+&=c2B{ zYArbw?o!UW9er>8IF}v2*pem9#XD+abDTPmpPPz9#{Lig{&PlLZ1bCs(y>Or`mU7pohA{ELI37UX|91E;^l8+_}PY1jEmel z3J6IXUY1G>`qV4XKh+bnW|K{Xw-fg2J8{OD)`NUgR9+rSFm2vHb;%CL$7dAMCHv)k$6Az3fulY&T^C2Tz`L#Jb0t3^l5(|*@ z{n9d!{gLC?ORetx|8ANi_Km@&;i>3=y{~8axE0?i!{WH<1 zFeA{HGHhV^aQjjjNjY`2eUjAVKD^p=GQ)w?)j0p7`RCxOkM(d;D}1JaO&M085htEY zxRKVB8?uUOOOrq~z9@LL{|gICq1%^lES5&Z@MFbri$DQux~X66LC>hbuV;t=Idjj< zj?1{aU@*(xDbDpA^PLI@M_LxdvKb{W(NuK-ctA}JkUVrQ;?-)Cn3n-(R?1wXq)$${ikc`-Z z@A}M;o$Fnp2w()p-;SE-!NZa#e1~S#0l&x-`oZuHBh_TI6Oexs@VpqO@k_iay0Mylmu~o~&q@_(%Ys zqxH~5y6aQ6w!RhojE&8p=@%8RWj~86E9$QJM=pzInguc;0~8Dnev(2_Mx4Ro#q-M;^x$o+69&Q?Cu`PS)%v2&V7#@rLCgy zVDp~i*4Yo95wgvm# zGvt#wr#c1f+R*E0LU^aa9bX`g@2_5~n8JvD!ng}pZK3UFWOHXk-evP9O#Sk7OrBD@ zdAW=)e!7=6)Hw#0a$<-#3vEtu?SjwD&e=~6uujZWyR`^b8={kK z)VmPShc({7n8}~QnoyDg4*7wqYEW-ZJadUe~MV)oJ#?&p50az2D0y}^_j z#eMvvW3-=E;bjmm@axz`UeBwtVxlhDg-)YZ#J#Ek?IqMBp;4|0i0G$1%ho4~i zQDaBi-P9d%F=gSO{%4Q(#~Qb>4dx!n?Jc)(HW!~$uj!0m`kEY#%p4?5=e}6Jk%NB6 zA5blphbO}(SN;0edx=s6B?L(yOHyVDtj19URwi*XnQfaGad&dC(@om~f88)dg@rZl z$a304YaPqfHS1C4v0mpDtBJAP9u@DWhwJRyZGvOTPr=H^-4a6GT&SjS zrD>0Re|I*$IR~ya@y}d`Ga+HMCHDQ>gvzT%$jJ99ZT0aRpPv2l;>RV=8W8qd+a@Pm zy;w<+&Zqz^<<_VEzG9-SJj{6Wsu&ye<#Rx*N?H=<&60UDf$Z0Bj?FE+PMqdCG9RJs zYv&!Yb(oD|DhtXRsr6PQ0uT3?2UA5OvK$%|OtDX# z4uW3D(qQMiNVY8kJbQ↦Q>-}L5K>?7mLN;C|tG@P${GC$t1NK~fNYP<-5g`5Df z=``MoNl&QrHZ%=yJ4kGI0kb30hgy|SQb&vA&(0&32v3Ynn z0Sq8v8&8rvVTY|@mc+HRDn_HxMtlsD=)XCb8G^^R)c$*xX2b=g#bJSmiD+8`pUA_w zcOnoc0FRlip2q?BAMq2k{F+O1xqdZ~&qgbZlg7@-x&=|?qC`F$7`nv?kRXf!=*Vkc~(J?&^Ow^FHE1jc;!t8wouKi6TjM`5}i z_~k`bjf*(yZNz0V*ILMOi>Re&QJ|2Mr0s8t^*%aPB4#LHe6Yj#t+RA`SV|#kzB4U4S!|6Vf+FKRHjwWG?mK zS5Xl#Ym!P7dA+kN=4DqHuA9g%mD6={sIZn&yFae#w@D11!aip&z%Acy4+9k?dEe#K z%d)K^GTR7BaLFR5{91b0-Rum&=a|d}e&%caQ48;CvKZCQuL=;tEd$yZG}CMkRD?Q0 z1pfw)==mG|8mKwX$B*+V;3C<(lyaftC&Ps;MTl=z<_a%MKekKkMcHhPPkRmJo<6dP;IkT5nk;1QE?JCRbEX{G=p@_S$~mDH z*5uLGq(n3hwBMhdFBb9$USxjgKKE#oFVU*T(sP*zb?L-qPuu$diDCM7I9G1&G`$0Z zr8zP&OZBrzUjk+Y0b2jSQjA7iC(51~doCGNz*!4mT71@lL$B!nCK4HyxVNDZv`IV1MH*@HlTR?4Pdz`w4mso~$~1C19~23C zA|Hhdqm}?7d}6su-9?fbDu5&)cN!-$2^m7?Zo~k44nK@d(ZGfQ+a{rp>h34)#eXH8 z{C5`|AIJq1HTDQ!pRKi>2APawQ4eEw^9yz+S(wCNbHrV%)%{azmQ;8i_m9pq$uY+; zE=_cnX~w&+uIM@{K7h21!asJpOFTvr16k_)y*y*O8J`fj6Bo~=44G9t3+z0c?aq0> z*M}*(cLhGHuiPIHukmt{Sb#ca>g4!sjBH8l@MGDlz81_c{pzn(W-rk!K@S!ZisH>) zb=5Ie+WU%Hgb=RO+o;>l6CeHMkjunwq$%<(CUeJjE4Pn5_*&F;??~5Bg`tQStMyd0FTR~&D=rm*3Y}o%HPSR~_Vi8rA?tL_Q>mA1LOMZUrlqDMk;&A@ z+cA~BdE|z@g+!W`BACnl7Y3E>vJ$8@Sw<~B>ZN*4PNEvV5Gj0Zx2JCDMx(@i!(wlU z&|WgsC&z6^cg%N*{q~z`kNoc{IENQa634Ijc$G?KuM1&~>pwjuNhiE*RXM52yonSB zR!94j0dvQ>aSs{eO@h~kRc%B`1A4+MvIIU`3DO7~rNDt1@0~Ww>2ejPv5wX>lh$h@ zp^V8f+DxDJ^084qaJpKri=e_c!5i7@-Q<33-S#j+I?IQ1PdbiHO^o#HAz|rrA*+80 z*}k5uwHW%EASV&Lol^J|!$gIu>we^x_D3kB_~W;?^XG3?cuYlC#PV@#2@($b>X7d8 zw2O&jjXn78={$PE*vI`09WS;vW1@1feyLq9Ui|}3!~rs?m(Qp8{T}H^!_P1a=X`2I z+r)z1eH9om{=m+g#Kc9TkzFQSxv2eR)#=xX68}i~JpECw77p)Y7nE-(`St<*GQk1$ z!uSY-PKuY?i~6J5N9Z{4BIOoVu(=!yU(kYdC~lB^a%fl5>V{S)hn->a(9~7Z_a7lg zx(uqj>uzKEP3eS>*5;t_Xi=*@ib!u?%ds;RdjstZIcJY9P}c`c3#~!VL<6mC6)J35 zZ1)|0Qvbubpp%SRf$#Z(9O$3%(PZrapI2?z@Qv?)A-L7nECTuVQM&`8@ z3;KrhRM(Xak(pV_1>T*P6U8x*&AC9O1&K8RRiFX@0ZfZdYEmj|3Pj1jlBdky2*Sj- z0QhlX%1=omBEHKu1nIi;Tez2DVm_FjWeBjnrmSL8{mH0aHI%~1_u>VPQw)Yul!Z}^ z3{}J@$Z#6Fp!(3rh`pO-C8)0(*d5(ZJC6Ep@NDLWw>}L*7Q`Fm| zVo<8^J7w1_6leZqdlTF-leJxqJ!xVAoo$(hU;*osoB zyD%>1D~91*9q1#$Q7&c@U`T38`1HG9osUa7!Td|cKmC(*#i5>4_gW`5B<HXL+Na#Mbr5i~5 zi;KsW(NNPVX?H*E?E}BOCEL!FMm6Qvmnj*KUax_z>k8z9un_vQb=#Q|u$u6?snBq~ zjj@DD0Suf}CfY;c-I^bPS=JL=SZZc6O^L~>#-9+zf#wusN6HYc0DIl8;WswXdpAmiI)?ZOmEp$GGu|0D^C^EWm)46O;Qk3L`b*1 z0}9>;9yT-CQ<&a0K;3->tcy9ypWfma%C@pt-IO(-!0pp@E(TbGuf+&9`h%eb;YG53tZe`HbZCuDWGu6 zV?zqgPOd}@mA|AFMmdM_9ZMDU#Dbdm^$+GAlJppFk{2hHeGDJOy{>YB-ExVJ%DxwJyd8qPlz?mB#z4ux+BdhQS=Wiw?n((?Hg zm}X8&zMsO11jnjng%j0uOsM4bdH2dYYwmRW5hJ}u3GZSCCP=U`6$ZUr`>#W&d#=VC z(6#F(u4#QQznM}pGt@Ce7@TvOWGN+^8>!MAAfC%aScyK z`FxRV1g`Lqhm60KNbG@J+nJQC8sRrL**HlnQ^$NldOa}|3z?`otd)0({)+?or%Kqp z?1XK4FuzygEo$_?yCzJeFg<;xIt#UnDi^Lg3y}QhO?d;d3g55?Rg@1=v}r9I}#4J1#WX@DW;m8FZPC!=j}i6 zt8OpvjGEkhusGKkK3I&cev+;RFR8k#4CYS>ZQeehS@KudZtlV&hwy|q1>lYpCg7Pm z8@7X#%5w1xb`<7nKE+8`biIFV7U8<5%Z7!nJ(`y1d7lo!LUdbl<{x;CJBWI+-u1=g zZ5E~{+udyoWST%ftAT5Wo1l-|x&JB`88vw`)yp!t`*Dt6t;P=Gf9SMwkE!0mI(Z14 zCO~DhWz8z)Txa5t!n?~X154yB4whIp}Quy8OK)CH~W11^#pCHlBDyn#5piap;g^xABRv zk)(9fGpU2)HG^J7mORm!g7sD^7K&jyEKE(8UM=*713j4mO*Yuf+?=-aYXomPIrxYL zlfu*KM^mO8818$4Nw=P?J8Vt1fYaiGbpgnx*sIjf+|88 z$N?M>j@1;!9kvL7S!S3aNTj~(4UEv?h%k{0e7iUik3grzHb`&*9&RX-QYvRD zuJ4czjR4vmLNLg7(Zok4q^^{8me|Wg|E85y1D6 zO=oY@_dV_Bus{RVGEN{EoO3I-q*$fU)}hZE=er zPm0*mV7?)r_Y^zBk94a0K>CTQwRb#k*05x_+f&A|*uo0luATiE@Q?n1GmxSnOj`eH zoo{Mw?Nx3U5dGi!Bxq(fwqJ|5cPxfGU+Yb$V^H~)Fnw>I4_&|dD3hPmI@=(AOEPXp zpM=Y*GW}wo8u7o>Q>v5PSY=&<3lBnZ=g+JL$kn;F*ZB&<(n1To8@onLMzV11cN3Sh z^_7ROj=e8iGFaov{a^OU${4fOSLFQQ-k8Qc{Odwa`9RA;a7Z65;C=eq?y`Xev$GfT8SM1!sL zU6SWK*^1&6Q_fcY!M5;I)U+N<$3hwHeyWYy(vmf{WLysGBL}|RnK~bB*4ds4Zz#Mq*h6k&sME8f|Ijdtog zCgik@CW3-P8?wXY=VtaADZGYF!L~C|Vqfiyjl6{GImbADCJR3-2LzXYeb=bn9q<0b z)4V|Q(98SAcmit_8q)|e+4TDyGOzbTPdR#l3exgn0eoJh@N4nvfN@dXUBY`P+}deB z$(0Zdc8uTTXXZ$$H{FWsdIqD$>Ja1Gx_t~aB-UU2QcH41nsc(tejgh+y`4$N=Hb6{ z;M~{HP2Vq5PrtZL>S|tUP4@J)Um-W9`cG1+C_!dCQ&lHUVpx1TYn)O^tW#0$--FGX1?u}{i>nwJfA-x=y zIXuP>GQooX$V)YC+A{ogESWY?{C>31$@teQDvr2+Tjl48XF))A1G5<3n_%2tN6q(p zwYYL@R{WnmC`%XR+{FweZ`-~|vNLEvH6^AC0#{bT*P{rU_nI3kW#}CeWx%QbkFV}nc{;N<7TP4SeqE@*0Y6qY!uLz1 z$)fw|xR@>rmDlJ7!82Uf+x9D?J8U>XTe&b{GLCrnH0~St6V+z+rRf&OstM(02gtP-Ok{^ z)>pP34{O=a|3oxZ?tf{=2EAk0qz5aob!y9EjFM4NO%`myizabhmOZ|)HtiEdvW7Ur z@soIxgv<|0IMGXmRj4!VZ;l%gSCcWi+GEz4R_T2cgZ}qoNxeSlaz$5VYEh!aK8>Y!r(X7FpZw6}~?`h5r|mliZ+1^4!_;lVW{-4MI6~ zMl2wZ^t;8XV(#7kV`lDh&=_OYLv$Lf&zSY-1Lwr~`J|864OQxkA2(56*;RE+TpQ6u z5F+KV{Lm%i$a9mO;Fd=7*fmKM-E5+~3oc4vJoTt+t*FuRsUVX`3NGSUx!4RIXMijF z=HivP!)W@kvJ0NNjnDftc&BIF9-7ap$@TKAFY$Nz7jM>6PhbFC#?a2*;yXK1PyAn*4&; z2nA0kI(sVP=a79X5SC@M$^Bj}*zz+%SN`#xFFZJ?C^%p@$ZQ0f-cs8?gy#!alDHVp zV7}Ps))nr4<8+e^0OZ2xFwt;5AwE)=2(5EIe;XPsYS@3|Cl1%+#bj0RqYGJMluuKC zEIvB1u%1F3ouv!w3fd%SJ?0a2giGi%;AU+18k3|XoZc;sIZjHF1@ulsK4AWFixFiJ zp)^b{9LK{B4~(6W{1#*r)?RVU`OP5z;vZe5Fio!!R|Ux@NQz-Ugn8l~eeSvWVcjXU z&2NxVHx8NOGlPUdbmJF6&t+@KPigvFnIE0v<_M){|6p%bZ0G_a$PsEkmy_iMSwyEh z3`ahn`-;J3T*~aEe=GWxZOxF^mY5IRfsfExQab`LzDDbKmBDb};PbB+xFz>dl9nXT z!LHaz6{35@Y&7bUXDmVPi$PgeTMt<^9IFHNic|%M4gF0o1fbeJ>zCU2OEZ?6Y1*Z5}l z<*6{3YkdAD2h*zp)#4gDSEgjLbaR~xvQ=+iAn>_l`xh`_d@T5_+g+G+BW;YklBb*b znrI#OTNy@ohPEWW?M-QvwwyWKA{9zB7msn<{G`6x%m?+%S*xH}{YnGS%Pr}xW7o(y6g)vh^Epx3n*=$DD^U6m3hvllgTdXe z%Dz(>l)lcDS=xn;3=wlg-s@efu~l8IYc$)cOKO#=Kz?lG@pdPMh|af#Ti6+55)1_|fb3uonUn2?S~=Ha43i z&W2QlvE}zkAlTGYc+F2rKyWMow#&0sb}G{D{X1&9*}&myqH)itjBM*D$*uteD%5Dq zV`l zMM;pKwt?<&dCr3rK{rFh6jxUBxB>ElifsFJLMapM<9D5%Fl*xU{;p`TS|-p0wlw*5 z?qrb~W6~*=*H3(>akNRwe5QyGa5V4nV6n!2%{Oo<$``OTd#}PC zx0#*xZ!Gj3E%rxPX%XD6p~^~G$y~jJ7{_nLxoG_`m1;O5qI`{>COL-HIR7s)PI;Z+ z*dWP_YbOW2JQm{frTv%q?*)OKB%o(2C?>v>n4^=GH5e>1VYYuO50?-B#ycU{tTF4i z7j=O1kEw7?&}Qb>*55isf!{oFF(k1!3JTq?qi^~Ihd%ofdSP&2@G{rD7*8O^eigTe zbI!YHe?K0Nd#q+>Zw5-g!@adF5>_tk`%xVKlM>zb>0yMWD|epYa$(bhZ@+}C^`y-? z`yJbCa%2hS-q}t4X3a~Tw;JMwj8wR;-5pHn=M3qRwzxjUpOaYSbNgO5j2T6JWhzYK z*4R5;F2%ClrJHF~mTwL{baZ(>AoM7&rgblM-ZL>#uE&|-AZIC ze_hU9_h`{d=W}MX??qLqVMVLXEi*e*RnuqFAfec8fdV>((bY@M^a+bRr=`CZA|rOH z?x=-UJJH&n4t_iFx)cAU$#D7+m4eiOuTy8uW9Ja8y)|O-@xAli!gxt~PZC_P79L_v za{lY{3Qdi{Wz!YnF2EqIdmcwpbaE{dcgqOr_Eybr(nxLKSJw&eb=8f+4#|3!^=n*O zG#oZ?Tc(c#LD+O+S~yjBn&(Rz#w>-1$NYQa(!1oB7p#%Urj z7iy0XTL7T*YG&*XzC7_aZ=fJb?;sF4?Z_Nc5|3Mbq&y%di}z%$)mPVptaT*3-Rkr2 z%wMHT0#PQ^_A!DJZynQAEZ9w1alv6@*J_71LDB_sHUa{k}V)!t*cLq7cY_(ny<>kM%er( z2$w0%BM>CvjK#4aE^))9iAQp`!Gr8tufJpnJDa)qNd`Bzsa>q=blDq>JB zPQNmiPf-sZ@yg7m6Fa98E22q$RNp3q-GDp*9#%@+Ql}?tvBgvEac*?^?cWxR$RCcu z93`CYw6!B}rJIt}x4?cXVdA4#$Xv13z07<+LnBp7r~WjOI^v% zD=)+nFVbtwT6=%m4sM#D&T}Qf%*5NkuQ~_xx&^poY|qIezR4Y6%9g}2h%eS`5KVGs z0%XxJ^4{8EO`n@=<}FTMwqkl{a_i&G($%QwX__6RWnY;$_|-LOTgA(S2_uj)2xejo zDqoodI!p7?^t9FDcYD9snx5Z5RyQC&Wh{Z7QJ4=UEqjhn6=D-3gu6F-ZEvl?kyRXM z`Cb8edo<@A!PsOPelI@2qX@(SXB<$6O*_of=RgNcBDsJ$=6}3NM&ZnNKgZ#Mz6gGO zH6;J}0mHA25~DbPzlYi&ah?4-PRu}d=wxM->a^BMc%LkfzMwdudu2eqO~C3Ry`8G; z^N>={_avvhFa09vNvTi5GW&D6Nb-u8V)lK{b7aSz4=`0jVYapvUn9-uo53EJ1WXBw-+7fPNS?d{=7!X zTdX|@kc`a>$r5itjNTb1QkxHB8?faX9LcDEgWF&}p3g(E1s0 zdC5rvcEtbj7D)UwWLmmj#b#l%E@tIqHY;ZeJKjCQ5Q-6d7It!`NWg38%c7eF;Ab-L|eaybM4#E*rCu%BU^6Fwx&O6>&Q<6Qjv2nggJ>rl_b z`B~DqcWlYdOdURle3HhJLhfPYx4 zEvLr@7sjWKZ3A=IHV0oRcK$fSeOW4)?b%yv-_7~wI5$)N-$4tEiU_#gf&TK!a$i@4 zs#G;*i-u&g?$%+=&^f8VSV$M-*Rm4KIdR!nvIW2McY-UwXmbx*c1`bg&uC%MymLI>FucCwqv0 z04)XgAChvvu^}n11WDdl8yhpb<&V`-eZk)F1ez_H04sjPVZO7_o)i%!?_GMUeYZ~A zi(^Q{=n&!0!fw*4E0e=%T`%)x2VIz4w~KJng3kZpoNbq@4B+vp952@LePe!!HH#_1 zED%cESZBA_3Al^GM()<#0TWg+pjF?S)W9VE*jbPFHcwRwSUbQugSxR9NXBx?8kj#7 z{nLBpLK*eM6oz^aYRf561dehvfwrOTldehiayu5j{d2q5uUk(H;Y^_UjL$XdF$*Pg$a@E{3{|a^>re}&%`b4 znGWbcPH1P%Px{NiV?eksEDv%ar=p@2pF~4Rx)`l08U&yRku`-fQEFxW#BhQrka$Nx z+|U-R>{Gr}n-RnbmRaX{Zs5B}VOL;H;9b-Lv7Ei^!)R^m&o(KVn6McgdK-p^ zm8aq*`rkLp;&oBQ^r3ljj0d_31GFUqbpji8ChP|Y2IBXhvZykE*CclFrOP~~PvERe zBpcB3=V_09eg+6L|I7y+xcrY@`TuWKCThSuOwEOD^z5nDM_kLeD3L8)jd`|XrjT;9!fEXqpaN^0l$_7a5E`9Xw5gyCckcd@bBjSGb zJ$jH`X5^yWK>$8iLqHf2qn%GS;J0VCAkp($vaO%Ul?r~glSd}<@jvqVJ`lDckk8#e z0kE)FW$TSD8vqZ-=-%FR3EATs>XDJ&bz1$oSyb>FbJHCcV$C8|0on`-O3I7~6G#o< zpGacXHN@bRh5(Ab6uZW4N+9Yvf!PuJ*`69`)q~YOr7k<$bB9{H$)!XqI7w9`k_)gN zlD)j|A_AwKGgbejH4%J(@)(Dmm;Xn;yg#m|74^SgTJzZYHxgD%KM4J+2Qa#>uICJB zGpnkqlz)dhkBQGgnxq=I^jqdj&jHx>NR#oeS^%dRJzJ_}Y!iUf+v)%u_NQFu^b~+_ z5;33+xhiXFOz@71_ykf}NeT3j5hTDSxhVfR`oCOY|L;_a;RN(21}VOWK#?pHNf0?O zUq!++;f+TjeItEUnW{Qb=d4c1M>+3WtB7fr5IQaIfL~OP@{9&7Gr@Z`0}X$5>PZ#l zAXQ9kQp?OiE`aUv=_V28#9yG3SsKqTkd*-5gc3*o!;;-l13#KvN%a9{9DAZj9V&4t zX(n|kRcR0&iM3tRtAHO4rVI0PZybWG$$K@F~9kj>hZxUm!IQ zbdqKWWQA=R{7jS%$XUCt%rgyQR8VppWc5 z73R7ObXt{Nu77|2gHy%SOv2n^Iz&n4Y4a2j@0+6p&3*$yml|_R_Le_r{oii%|M%{X zOR|omi|Bjy#^M{X6=2+Dy`#j|wyBy;Al{ryzMXwAmhEH{jMX!<3^@u_RtCG{SQ2zn zg)rQyr?P;LZ{w2vgqOS~!W{Fk%}b(vv__fu3nJ|1QyX)~M2Tt^Uc9*LF;8b5RXk+E z1R~K^tZRmzmnJ|?EU3UA-J`8B>Jpq}-nV*rMgRe{Syo^*+DGgKqdVWodMuOj(vY*rb6AX`CfBDO{DXr!%z=G1}`2Bo$8gvP6Dw z*;s_X6>0SvCrl((&tp0VN*)JiMC#T3eRz39*n%whQ^tq2rnwljf!jMf{||L<85CE$ zZ42WPJi(#y06~MhI|K-X1cGaD2<{%-lVBmiNg%jOS*fkwJ>ab{|KZW!I+BZ*qY_i z^MU9i)c{(CF4pZKI|Yv2SOCD z523HgbG?#pz!4RHXXp^GrvgZAWW*N7yFp(XHl=NT_g({h#lslYzLGx0lp@`L9C0?E zJ$3_3!oOq^eOSlhY%+HK&W%Hwg!cwE)Rj;XMv5k=wg_w>@_3?0oQzZCJl{I%Xgw?} zEG9SOB}N*w4u9F^p}(gK0lWvLr~_^Z&Y=zJ1!BEnxlDZ|J4lh?uDEjuI1JBl@x81% zX;EdLM|7GiiSEejBIRKsJd1Xsp_IZ-u8~~*uA2U00C5ND8e>&bi;f+68^x5cixFy~ zc7%>uu01q}(tbSW zEsa`t_tURbv>~tb39Igwd&+=#?D$+Q*9 z0AQRm)LcH6MvpPKEvj(U8Vli|m=DTAcfwf>#B^psW?Dlmeo6A;Ik8K-Ura+to1DaZ zLAnxRb$_tq! z^>CL%xst1!Trst${r>SYI#0zxRMtiT(Dx6N^{u)!MZ6ueA)gFc{`aWqU!hdnAu=NE zL{pWUV)-6H4o+9e=_~j$*n>rPMnCZK%u)At=RSOiBc)(o|f zPYS!gCs&``icnJu%xEm#*8NMg|zwy)L+3w`-jGiOkcxA+G(^@c=)le$e@0Jx4 zx$)vQeNqXDfc3t_`OY|fPx2MDJMI?|{z#u+i3&SF#VwyXZ=Z^+LdfT^Cb6DUAA6MZ zpe*404*1Xe1Buh>kieUBmxi0 z;sBb`Pht4Yrz2obnfgo|0#VN+{2PeGq1j_kvSpt`v1->{ZW-cCx0y6v?=!0&reRrJ z8G%ll13XyeH$lVd^62&1h~QFc#tMUNPglPWbBLOpF0Vq#h(8wns7kN%9?&@c}* z%dqYxt0yEPdQSYdiCB*EPS1S8GYf=D&@qyN67gcizIQv1M!p@AR9EZq^@mNY!_p)Dj+ z8mdg8C{FAC)T4NZAXr-yyDe-k@i?Jda4w0irQZ!G^JL|u$ z#a&4&REDMZ2G@rsNJH{KIf(5x7?-wgJS7F)+1rv(CefqM5Mto=pagk*wNiPfz(kI&8aeLj{BRciNfW+^_#rd5G;lzCaga4G*(m*Z4o^7`4NI%K%3 zR-@Vb{Kbkk)5KgNayIKp-R=YKs>Jlo?)gu>Ix3Ls7)?m;Sax_Svz{^LHQ@Dp9_SqV}7T!d*KSRHP9Sll9V@n`IJpp1#?`pHa#^iAs6&^%6kZ;>`iS z{i|dHtl3JW2>LR)6BqPB=r`wJ>_b(|*adDX+ zF+aOX_z=71rTgy5^~*;TGuV_eLHrxnYm@kuhR3cteg%vt?M$1Ow4TjuBF`4fdzAU) z!&k$nAhb~>E-^Ui@1pv;&d{$ADoDvx>a+$IsA~vJB^Ef*kVpQ1K#iv@z?%F218ODHdKu+Li}1SihE653y_KHTcfnEpDmb3t zB^~xVo<9S+!aU}mk*0`vp3t1>dxE*_aVSwJ{3({t_Kg5c>L+dd*J9=iPg(!pvAb(0 z&TP7VBR4;gJ~$l{m-1NV`_e0v-sBXFz^DYCygH_SHJSncF!#7?BP5<#_4bw;g`-ax z--`yc&a|*bkD*m4L!4MWBKngo2ah`kNRj!Xv}4iXOI)XT1ryp$NLdYwdB6SKl{*4n8E7-Yv_ZZPR`2{lacm8YW6i@9le+eBXB5en(YOAuUeve*wEBrzIsZo(qMziflyC7eK_tq@qYW z*gDcsUkD~!2T}G_0dR@SY4&q}i+2l`Y42=ed3#Nb%`^+Zq{mwwZ*+&DKqvCCzWtVO z82jFx&#H^@ zeen^r{^khE7q8$rafE_yug84GXM}0oENR z5&HPK5%N@k9daVc+lmU)?ng5BW%>;h3B?g3Vs~eg*d}_d{t5)q8S4_bj@AHGP}iTZ zY%*R?H}pyXRhDDhg5>H^Yk6E7Nj_jwg*Hn=&CSid1GHiP0eDoBf2H98R31P%nGPX+ znuMa2olbb{#rw<+s$HPt`FyJ5z~^W0%*2D z-+;{tAAumU?=__b`{Vv+-jZjS^Gx>Zma@m|k|<6eXk#!!ut5_3O|*gpsv=fnG{3EA zX*C|;s}B=1C3-m4=-fnwh%fCrNMt$a^rf!UGR2qiTWDN=3upS~D$IK2BWIqd2?pVc z9!W~K%Ep?CK2kKpFNqhs(BXwiUmh&0YMlbys5}L~-E(JP5_})z0LLVA7$`e79f#3= zl9CQS5(U)q$e2d>X%W@_Kx!L1GKvs6L5>jiJJ!lET1VgZKkg=(1e!vuER+nM5Qpk6Pde1xV< zY7M9@NDRv3alQ+74HrE?HD0c@*OR%5a@>%>FiJQ`2n87<2j2q|@;hLrL@)!oA?LF( z&sjmoCVjaVVN+28iHiPRRp((p#q&3i!HkSV)7cS)Z=v?!z$gmZAcVEvM>*YN7#Ey= zY;=xo5!@Da==#{?&IIfv>C6#*pH-VB1B1&u)la1kq~sOV`ji0)VqHRaP%tmbShPES zLn^&%7Kf}O8)@c%*u(673 zr=yZOjq?RrA?2e-C&QG0F9zw4V5=JxJ+-~ybp6BrDi_B)n;9+slFp?uDy_gLH>jM& zLfZ}`$JD~_G2XS2?0;|rTp&qV1s8;?4bSo4x6~GTpH7hdEnC%gNQ#JWa(t5WNMl=4 zEA|y*;97T>`Ly5(fsiv7)$GA~GjSz}uw>s%LE@v~jgKw-tLS2V6;VhJ1mjWsvO{zw zNVoF3h}D39YPg^bn)h>DYoenk0XvBWIx!_itu4Ekd+mJC2(}(+V*c?sW{v9gK#=7@ zTv;${PmWlDvEf)a(!IJM5> zi|#LI=7-Ws6F|dxiS&z>)t2rg7`ZeEZ-#shku#>7JSz^_Z6}a}{WS%m!TQhU7GaS; zzveZMNQ4NmAF^*dDoH9=Dwd|5 zaU2bEHnK)j+ml@^V-lUVfa;JTxqCxeXpZ)Al3eqESt8O5116VRG4hNAkSsilfU71vBjq0pcA(#MOM(glyl`(UCR0glaP z&KH2$;T3Hxx}qEVYZAoM&maArXI|Y#v{de?y|R2`LQrlvh2(d3?&H6`w(n6R`xL-H zyPx5?UkuEi?Y|HIz87M-s;oVpae?o-(r|V;C-Z??Lt+*uJ5u$3(OSsz|Ik|gy-nBY1rk$X zMP?pZafH5#9r8z_2GqfDc4}plP99;DeNXW#-p|tE`4VYDSLV0kC}k-YOz*`?{4;UX zGs{*FWsbbP&KOQQCl9M0Paf{O#dP<1LgCvQQTGLL`0(+2gyo4Y@gA~oR~bZGqEEQ556)N>yT4^sh(D6OY#pcg72kdIWd%JZRJLk_5uy36oB3&&fEG-R#IO-G9Q_7hK^#(0qX4gx`w*Xg#E4HkaJ@E1M^ z^A0Z$?mlELhI$vY-I-xi2Vp<$B=d@Wx*R-2Ah_Nl*&2f!rlT+fvMiR^#+BBek@xMr zkw!lXaUkrAHvAPL--uo;VJ?M>mB=C7F(Tk#C^Kqdk8_~5;ng9L0I?ECRu0dn8TX91 zF*g;(*r_{1kHb5-w2CEKcp|P<HVSH4LOO8Sn9E1FQFnF3~XxBOQimDVbYhtUKemT&&I#|ko2Kxe{K4uEqM-iYXw z>-xGbU=9I%y?~g<&UKPsc5&r>CyqSY-No$~^Q^NxMW2&hRIT%tvJAs-x#(}Wqyg+C zIj1#Sp;z5HgmZ{W5zI@#R+Gjri?Hc^e|vofve7L@vY*_L?ftU{ofe_-tY(oRA7s!I=g>hzkEopp_Rm z%^|AnTUz@%#nlUFEyjSe)*-?kzrJm^dQy$&87U1@C25^IOxJrm&C+T-M=lK7{mubs zj`)o>h+sWh(F(s?CJdWPCze7W?)!^0Qw-CDMiK+p#QAzylD1(saCiL)r$A83eS!b9 zIRAt`>x1K#k<7QCza>*-@e#PtHpW(N?HmI)6H?Zb=jCqJ!>M;W@m2+-ffUmZ0{S>C zN(2L%@>TDB_9uy$w6aaXzf|(==E{R|V|yt&NH+1$y_TGMHAonL`*I_@12u^DPqw1D zXncWghiaRv;KLSMe)LZlz;5{Q-N;uZYTu(Ejrf|~m1c0^AoWglv_4ilCb#PabRM9M z<_bt7hJbE4+>zHr3R(eX3nFfNF97Rt8w{fz$q$Y;LP@!M0jH0)F{G)lXr0MX3HE4) zG@etpBcB5qU76rfKy-9=xij9053W}LUMy%bOd{e+Y`?h53Q!|%uSii~oC2*I(|pH_ z{LYE|E|)#5_@7Z9p#mTUEl&C2N3j2@#;(&e=Km0w{F^E@1TecyDw+~8;HZ0=ZZEqU z21)}Ena+ z=J;{MiY-0i^^>yEWEK7R*q0WLb9f$vIc(p2+Z-rtuI7RGzqffCc-$lCMG{b0S4@Lz_}y%p(vZbD)hMI5#!OX#8nBs2W?$=YsdX zgM#qnU&`#$m1FCBQR=l*v$;{X`JNkgA_ z!a$|ithJ4va{Ocz7c;JQah>k(W>$igqzRDdk3Dofy^W9zqa zDJqOBU=GeEq$;W)R1)7vsx3SN0iA-`zmDUj_fopV&`MTtQEiI?A%={uq4#dS>u>QY ze{lei2ZpXEeUB5`5J!-T)?%3ZjQdN7E=Oa`(F_%dl7fgXll!Lpm}-V#|2+^L$RE=7 z1(_>wto=6N;Up_JR9r;ZAi4TYJjnyQyDIx@WW>{R^_%{;>c>1?Nw3Y6g`<^2C384w z{O%5-Ytw#|t$y><;*NVk2Wr357%r`Fu3P_kakDmGH$k*CDc+_zC69o{byup#RkL%I z`o@J|npB|&Lmr+>Z<-_rH%m7wecaEmOuc~|Bj@@-890D@TrTN^#iyjF&^;1<5KpgL zrwj_-X%s0Pl(Vt;K?M18DkIGY_K>mP^5?ij3*#?33MlMH`UYlM&LLtAMWDF@F^e%CyRolu%_#ik?qm zGt>tm)0=@BZ>Y1Lz6`f`R=&#Rc+a?pSF^r12P&XPrT$n>AgVqazUDs|t~qxUgWE3r zJIR(mX0ogP4p*G46|a5YVjfdpy=c|4a}u(D3CC|Tq!Np~JvxIIcb&n#L1tXsRPYwB z&p8r5>AU=^B_!*3E8?BxnOc72@NZZ>W5v~=*l=x|pNlxB-DXL|Q(D9*XIDiOSJXcH z+857oW42B7+V1t^W{;>j&jL%4#(EhZTjTdc1ZA&UA2Qy!+-u4T1s4-Op3)=1;rC($ zyo%xHv@9fvz2Fz6Qvj`J99-$7$!H!Tgzv>{^2=6pK&wERmgchN!JoC&n$f3qx!wy$ zETx8T?)~G@C-*^AwYgftY$cIBGE+>OUKc0ldN;_u=$Ag#J&&&KJoEhSDmw36*6wYl zOD8al@YK8u{}wGLp(DyZk6YX6i_)hfWAf#on?{Z>N$Eq@4zW_xc2)oAQ%Y3wngL}A z-rh(+hpU`<6%ChJc;8b~-Op#fC0@BG8=g@_g`|A-rkQX0Z$VQlE6PXO3L1L3KF)@f zNr+;3>FA5|JvD~%Nkr$-V=l6Xk7-q-S^Qc$DA4oIKCdJ61lcc9Ml=<`VrmN(P0NmT z*#=S7gE~Sgtr@ILzrSn5?Nu4kA5-p#ax}q`+zhX|xqZ-I(t1g)G;|q?8rgm1%+U9` zgt3GBhdM8YOPP3T>H6j_nf6jc>auXI6B7^L{@St|R8Id26L%CNBReE*F1`9qG#t+= zifR2VyU!AVAlSV8RcDEoxk4!Af$3Iiaz&3}d8Lx#Lr~o?@HIYLES)tli9PB290!Pd zkG|dfz;l7~<2or|J6lDwtobX25haO@G_=ZP68`;H#7b(rw?;led8B^fCfhlSnAbP` zG=lfFzHC+Gum7N?{NIyS<#m8g#>25Ja6$Vo&+*SVW_`sU`;5DVlK1nkf`${BHF)u@ zvBlQMyS7%^H!&3m6qxc|*a9So!ne{EcF#WHWN4)IDEM;Xfg+8W^J7!_So7m#OXc8$ zc@i7eX|A`m`&_N{&xPGev_yY%`kuBqRDX%ITYXUO>x9zZUn$dRvS~;VzKPC8@?#8+ z7)vEk0^Drf^mO#-K#@1uY>6+H{CfcrKm%@LO}+3yJG#z-48Q3%u?;;P(E@c)PVt=+ zB<%|x*J)HeiCEIYD1%>wNPGc_?))Hzm2!A<6M~K5|CDI{GbsP1E($u4242~TwC4HW zz{NkqUY_!Myg4}ozx~F{YKkpZ60gTr2AXe7)-=_V4-RJMSX66Jg zW#?pg9sf7j_|G%=kO8^Q%M+vmuRouw|NPIFC<9o)rGr>X)46}!NdEl{ZHDr|SpB~+ ztVa*9ZVMIid5WJUhh>>TY{etSEO2^eh z%E{1EoRSI`CX;X|L5%K-m#x5d%Hv{y9Ghf!W0ecMJ4JflHC`s?TAGbHA#H&F~5fe09X{j~8u zk!<()LX9vH@*l2}%*V)D(|h(Y0^sZZ{*99_5KvdG{Sg%L{?k=Sbb-N+`HFP7f4?aN zj2AqJguXwn5oyH!(^Xng2#;El=pqIG!%h7jtlQu+ZMHb|D%?DDM0|w5fBIyMqhirT ze4}%{HjFv`(>DjUojpEk+DJ7zV&`_W^UkLc`HxpiQX#!EbgFjE3(=+=dV=91g@7XY z_s{o~z}`n8HTu#OIy*UslSOFrf4)Ae1I6FWnpIDzPEGbs!$ZQy##m=*G;AcF{d9h# zQP+Jo^3xAjp+lqEi01EJEqz`dmr55kPsBF-g@tiKk>Jg8SZ`w?k;=386t9#r?p0UTbL{BgP|`L+V*^IihMiFUsO-I6X6TZ#9cJ zBOQn*)#Ck;JE($(vZ#W>4pG$ky-UcCQi~lqMx2ByN{WQt+nCNomn_u#R%qQ z{3I-Q9d3E;^qyfH?F?Uma_{LBN8sM_yZ zKtm#DDa@_49YLSGI;~$mYfb0S+wUJXetzEK`cn1P9aL77$r{@Tj=#VyFRFzR$LW0` zpVJ;?A@aFiODC?a)E;s)7HFF5_m={vI_gi1lsY&P+`ko3$HuNuxEY?5w5`^h_<+$Zt2q8e5C8Wmzk@GPS=6&`Qi;~McFz|8VjTy3TO|MZ-Q_<~PS zIE==+#G9>OZ|g4>6x{3?LC+up)5Nz1sn)@K)4EX^&loRW6j(G4TA@3d#Xfj5a`~J$ z2|>{<%YbfGkOb1AH$NmrUT*PA$t~nvbf4S4RUxxoy_({8clD)v_A92kP)qsor+(Q% z&#&&7+Er_dYN1dSiuAdVkUI^{vv*H|+FM?yxWZeBE_iFHvXZ{}OGs7iIvKP7ahlR3 zNUqhtxxIxW_s+r4lgB(5K-UaQ0$D`O93!-~#bKOj78_%$4F;z_6bPT&0!;!N`2 zO~ARdp%z9BowK!0;an(wS{`0jTwQj_;rP*DvJ>xUC$(mRoN7WO`mJW7>`&(vnZ~8Bcu;n=$ z76aPn@4eb9#h|S&8WfvG-Xi?sBmB3Ou1i4L=iGwN)svL#AVWo#Dg1@KLAS=s zXznvjUK>?RWw3@S{;E&OE(zC`#)EMX9S`G00r4K(@!6Bcoh56bxT1%_b=nERel#Tr zz8?;;%H58Cls)z9(IEHk4|mmcxCoo>+aSNA@o< zVDIX`kaMw_x3E9G%Qle7wHrMuuv3G2Fnn!**V_BMW!>85oLRVu;`uCi>2Y4sdLYWu z(^!b$o$9{t@4d*s4%1MJ>^-4=?1;?qV`-6>rk{WK#xuik+Rfg8` zwMF@Evt|tMrAnY>o03=db<2TxZNMVOLuzO*Je*WwH6@d^xXy!<%VVwdi%57$URj>| zheyQ^s+^946C3W_0p<@<7i9`@@X%c&N9%VU_Bs&k*8GaZyQ3kTmQ@oL=Lc8gK(V>i z(UzrY*w;a8eR;6+>e9l%%G^T7l~*m^_)?Ka@`#B$TEWLo!LkVzSgUnPX$@+o%Jkx1 zWP1v8P_V!`uTdlOmF6n7XXHs+<)4p28i61IP8$u=BT(_fxP1$aEfwkyF#KX!{TTeEAt+1-Nc@Z@YiG1; zg{Z5Ag@s0*hPTAi;^N{`^LV0J zr%S&tH+cJGm8nx#t~gSYS%~ahh2dz?;8Jilu9m;2g)@U@*K8!_J>TREW>SmyB~O&D z8~xa%T9Gtz25+q)^7jA+OuQqOvo*E-jEp3{oOV{`9;F)56H!&0+gPk3lvFVWtpOzP zd&%3h;Sq%Qsf8%=Xg&~$(eB7qDxcP>qtG~8??x-Oo5-()C)rqk#!zf1@;knh56vRN zoR>f4h6kb-TU?sr@4mTals=W+&SoX%tw43lncC>RKEU+ha*o{Y{T4q?fND|&J4%Xo ztI1Qr+OO0EO>mJAUUX2nfj-f0_;mNJLMMf?h|* zcs^KmKDxlOWYg}EDBncbOl>`QR#``5}Qi-O5uzpqB((+*RNIV{{^881z(c!_nfCjrl=dV3< z-t!9-){rAHrd)cikGTwb`(o!%^ZS#U@S_XR=^u~mswhdb*j~d~{!9aTPHi9m_fr9B3%Vs%Kc`vm&QCR=VQ4k=EFlfmHHijG z6U=y#E&j;|w_nINLv4Jac-m})E~QqljqZB@GdN&5Fe&v};$|L2Hx zag{~HJF;4(fv3pyzNH_&o=CHm9V+Kh5_##EXV8Xsj&}vQ^*^-OpO;hpc_A?IwjrS0 zM*{p{etR&_IKfsY&Z2wzX6)yeq=j{#k}bjT2k8EfkjE2s$gXN9>N|3dDHDT>zue8> z(w6O))bUg%>*SnyDZK{Ero>rZLp{N|Mw=yDq! z3JXz<_|Ij$`8HO=<@Nu({TcHU)O(%N^|X3|H;qZa4oa-x&rv{Q&F~qvXRIgz*6Azv zjWWxal$HTU<`TdbHxM_*vCLWrGhJPpYUi-BIa+=I2P7;vog#o^+;8sLyC?(nBq6sHVnCq{u*-@ymo)3K9(u(QmPRwsWsp(v6x@3|C z+$`0u&5q3TCNlVN`^L*otl3r7af=dyKXS(cJDJ96`8^oIJ92ineWCRi{hPgADvPD$ z_L1yYwUa_E&a9T;0-Wbuj20)&-)CxqMtX?;yfwVTM>O0KY%R=KZnB{TpDyufbrnay z?lr1>6L~?tunhm1y4>t^d~VR{>1S_-?z7jV3MF4Sr{LE+ z=3Bbc{Q=T$_aKDW&+52es%b1eFmK+jyzFdd@*$IumT|ZrT(nAYe7eBGLOEDs%PkDj z2{`b@5e6A1sGeGE4vdz&4=2@ROXy*|5F1Kv(f?|nbI%k$L4NpS&~Y?*%M|p-wZjQQ z^6fqNVN}aK9elo__fv`Z$NBm8$mAHTxHr?%BXhZ(e{yh&W%^f@uh$Fzi$lH4#m>E5 z=AZ}RhZnV?-}~-JLFY#V3(JF}S4oS$!Z+*v?I!MbSUz*D_Bx%!x`xbkVeSv5AKei$ zFt_Bw! zR6M;Flf&c}*7bCUz0S{1wDMaU)!477vdtJez6We~ERXtXpOGy%EWeMdx7nW>eYx## zseyfWO1pg;M`#9_Q#>Gtq1?6qlQpT%_ zw-Ws+ByD#YI~1nAr@KKnt}bu(Ebq_|Tm70+{Ct8SaYIciP*Yy~=ZwiPg`TVHh zrs)XC2cO4Km(kRj_$QQ25r4W1DADPIFQkjBWHlru? zc2mQ(C7QqaWkj{4@D}yzAEo5pi8S}I+gaM6uKKeyQDGaN@GA_0i1|7jwhTvUhSS+l zXB;oh1Ns;0c2VuU+da6Nn_w)$DVHmTK1+_JdYQ^My7u9R5n4i_+gxGs^x(vT~X+?#?1J>tUq9-6_0mRfY-boGa;|I_I*Im9rLUv+`PHUwX4%o)TMUbKNRwvYv zQlx*S`Nc993nkk(P7uuGmbDg@+(Boe$3a47xq__~C1$i0I92cNPvoGNg{vyMXQrH& z>zv=g@BQgTq#*ZuFzkhF-@6=CyZ6pfetUhG4;J?769l-qx(t$!@L`jdC;3!8qQpb>wZEC?9D_%T^2 z*mL^TOih>5vKaGQfgLjUs0q4PluNhbWDE+iTK4xLUUgbtiYYyU}8$%V&KJv9-d8n4NUY}!qA$#yn9XXm_6iZhYQ4uP;bHzPM;V*ry zAE(iNGI_R9TYl^}l%gnfeN~&iJ~ng)YBB46W->U@@J_0T3sw;%^^ortM!SU|0l9!D zn}IkoSG%ro4X)*EpUK=fr11U}$-Q2~qSxT^&G>PEGzx|{P}(1T`-T!Fcc|uPA>W=N z@`s~B!L>A}n0O`Zs6<_2DKlPc10}DWJwjgxt$8Pb8kv&)R zM^OaMIL9nHC+aOLoJYOBw>{s!Md2Dkr{mM+_$i6__G@52o9e&Rlw@!0d*STecosXJ zGeG0i(uUlNkpsLIMRwR$14Iaj>O0?43ws8hqjCCx&F_6LO8HNzdmp0m*TD7+EMWpx zsJF*ZE9tlOEuS$7=XLB=*I9*;Y}@Nj!Lay}=I@ZVH}Oj8=a}_>CIo}hUaZB1EKkxM zo>mVEsJ7QpI^BlaI8VmASmhyY2*vJUby0n<@?0!1hBL7e55AhmWf}0l))9as3*Xb0 zHkULf`Ng@P-tR^|?br$#OeqhCyGO%|7IkGMpnW2~=LY>=ajsUSg(m1dJ#)&t`b$a= zg5fLA-M06U7brZY&k3M=l)bGr(?&!N5yIr?_4YPJ!2QiE1KrA(wrVcBe zJch>y&=Hwa8`gXX?G^cOUk4NEVC=9l5mcPb9OJ?(!*svPQ0?Aayr4dg=Pkf<&ox|_ z=;BCSZOb4fHNC}Nuw9tSc_+|31|td4IDW~g*$X>rF11J08yN-X)8)bP?esTzIy<3I z%M7y=0f7pS>bi;|Jv~y1k#&@<>KasX&UAG_eqze~av;+5D+EXT<%82fxFd@On(12H zSo8(60*-M@d<4^75d0_P**dtFRI$}=y~&k z%N|n*{9r;XYum4rkG?ybiBtD8sVsfZI{aLnUZBJHFI??AfgrY;%F&F5fxr_s&;w2D zuc7xCXjH;NZ7Xt|_Ma9LE|99rU$Vj}e~?rymurbRtU>N%puZ#RM8mP~bt39#5y6~w z&9ycCtC+r%6QLdF#0y*S3zEz7)wdHP$)g%rbC<4{ETO+}PPZeynmR{8wKXXM{Lk;V zY?|7xX<$8DM#I&cZza&RGahtVu=m;7681K4@4t_XOuUZ@Uo?C*NUSN9LSjE!r0wy# zEK?+9xz;W;al^+|t$fLFK8tEAv7SOZqu^Zc5g#0;u#`Osiy#KxMbbm`t^)#Tyg zD_YIVest<=dYZVn*e6Arw3qBKcIX1cgQsC_(CEsY3wE*f{hTKUH@;tOs$4r*6rO)7T{g?LyZhP@Lv1@0?=v=f1&VkUreZSp6z6oF z*^f$XLIcP5TXf;hZA)$Uy@Tz1{3TN?;HFMhpUIBW6nr`AWLxntgVlspe5G_la6{kt zTB5>f{5kHQ1G-7YtmH?w57kZ$51JLPPxo}1-O(JFV_UQAzVrH{beNUR^=;d5o>lH{ z%5{~q|17eA@c#shQs-~Liqfa)xGo{6dj1y&=>@4p0{-qWJhh7o4^9r)BSYCx6~m_6 zw+}SnRKqdiTD=|pRsMlzb(^jz9Yy_+N5U7hrLZ47o~+APPEYFC^lS)g?Y-j0;~>uD z@pmjNH{13Kj6L&r`vMGM^AO|C=w=C>Ixa4GD`?&Zw*)y7?5Wh+(OQ2`Syu_~aou96 z&*G&|_XcE>(Twsh0Q@mxOWN5&H#-!F%%Mw+E&}^jQJ|zGeLRWeAQG)l3kWdXnr|hx z>8H{6CS1O#u>14sZ%ufUUQK!#OCt{Zq8o>VxZfPVlhibO%u8293s6F|@9FB{J~v(X z5=`~q3~w$%a}Dn2p18g{P!$oG&vc_Q-2}knXd<4(qClY-AE9;fU7SWm>LG`JUO*r$ z$y%pDbS^Y1-7I{qlMZufF7T z>|d%gkj$~Dt~c8I%yXhI$$^(U4<@iA_SQ!>#iTd(t2Zg@gIddDTNz-PxKw6kz9*;` zZ-l?Bw^`X4M2N|8>g|6?HUAxb&Bl3DrMtUU^7!Ag!R2R_+y(raPYOxqh(-_24n1OG zDn~)laXCAA^>x(Z+pV&y5fj3T;R?6QoeSjw@P7K>dOz%tO{a!EBK)BW;cY}He{yJNJ!8T_Mn_1f3UHwKg=xISwl{5>cWlkITc zA&;I$y!9PBxU#`jzb98K;=XIVGlrVsSd_?Zv`~ALce}r3(dVPz(Gj9>?A$UHHMu#( zW5Y4`1l|@%8`Z@x3|Muoulq9Dnv*WUQB++84#gDp3wy*uaWvMlefuJcv;`?@^7H8g z-RDcelJh9~nUb9x56by=VPcvzqECE%4=Z7G&Yz2+q~lZ0e1u64dWjoaddTWeopnf= z-IY?7h)%cKHH~Cn936{=8VtP*!2}BzFE#jTE}HQknM=j!#7_wkZjHyLQm0Epz@APn;Z}>FGREEYL?k4BOXC-u1GX7O4hHL@hnR>)}8Eg9i z`k*bOHS?tB{V+uODfOw>Ysrx0PRrbYPrL=+B{hl-PSlQ$a~bCDm@C{Pm5bH0a&0a= zaX#iV(3PQ4b(6~snAf0Vn?h7B2*u@W;|3;PNUidBLPqeL(e5qo3Y^#PwQypGX4bAT7@8pnqjoxaT zM|yib_^V`B10zkYk&-}8y3kG=RJ9@XH(X^T=!au-yJlK*2TkyFwpypngiHto=$kPHnG{P z*Kk!lJZk>7+n$||&p$D_H3Eunz23{wcRE^{+54rR$!-(}uj-Q~mwS-;CMWeKr`2h? z95OZJ{?Kr4qNrp>5Y#6At)tXl$Sv!gC1Q$Spy8!DKL$ZO|g{DMCF8(N!|*F-i22D=y|)?>fhpdS zTOLhj^V~!`JV_))-Nq*dErTtBU_VhfJV&&V;M9x<181C}gvr!ubKO4JzN_uTsgOqbdWhf<`~w zh`zx2X)wUdYWpI45k6 zh0()CNhrsGV+tyz)HFi;NmB<^4bmNYZJ ze8SUzz2`T(Ggb!S^0m_B)de3F_sE5K9f#E(OxB}`|#093iR;!V#=A4(6|P6D-zH+dn7RamA5xq%gqbpFKF$*c{WZ|q_g

V(@hwCkfe6P>i z6USS#261@^<^P=P`*#q+BA?ag&s%LX2-@mm1+V#>DX5@}Z8wK}j7j-~(eg(Q@Si_g zV|p^pX$|98H_NnEVzu~g9Kdxm0U?V*Cr$nTVVn{q$Kf#NIA;tVOO~sXe(<3^9L1o8 z0DT^-Tx7Qg%T9Xsf@eq1$7^!ZNul^*^eKXtEw>nI49Ou!=nB zgH!`jiq^pJ(%O|T8&m1b21YlZk5K+DyhoqbucG?-Jp;#34S z>YZ`v@}KAW-f- zsLoQc9Yy5Zkh*-yeg(e*#ae^C0Jt;qlZB{*#u9>pzay8SP8x+1%7qWLwUAry5@Enzcc>!b5yt#l*Xa`=~i5&liJYSu{mzlIkMo#~!i1ev(>|LX3%qngV5G(IXs z#e`;%BBBrw0vZhv9lA&jMM9B)0*;{*3lm5fqz%N747~?&=uHp<1W+JY5F#ZaMM*$K z0VxUrQ4|3I-y3~r_UxWr*Y}*=KX&tX{z;zu{XU=Px%ZymyB^dTS88@C$qjkgz?_@` zu~Pl0o8dZ>sAYZvqJK4`P0p5ieLO}_dh0=|K`c>Q zF4%wc?rzEOHKNMUx@qW~C?vDX*j_nzYSC&c#cE4P8euu;!^PSq0b7Y=jHbU0MLS6n zmaQs%l6m`Ftq#8r7`3lLme@Fl!Eb*V@lNiMV5^<-%=i&&@r=kz8|f?@2D2(!H(Kg{ zM(oKM=`0$8;Ne%sizknAQ(FER)_Hoy3w*WgX7?Ls0vLW4g znhK9~DLq|G+#@$^<*znie~Uf-8Kj3;%_*s=9C2i}VLe*s!=4$AA(QvMt1(fq*!$f{ z+_6dB(@4|x&+wIs14{%_dCMp#eg01M$3F^F`xh%P8;64R*d2Y}Tlb^Es>i}8RM&o% zw;yrZ8@8wVX;>xi)zd%Shz9uTx!bfCHlK7>kLvnj4P+%B>ThP)6`ZxZd)wnTIixn& z`Uu{FZENnf)(1_(cB3l|xEfz_1 zAz-^A=cuk(w+`xFO+vK|b$M+cFaOen(hJFR5^4d)-JrJQb#B z#pe2-K6cn=tU!(6FFqSM891C-RaR$hA;N-fG0>t~#L1Th`nRLurQnm>kH`Uo+i@?am`ZE0T^L*|`aMgx258UrNkPtjJJb&zbY6Tc9O zS^!?}i8*kEf2slI2(GodWj?E`%^XIpO`8@jyyzd4=BGh(2j=Vf3z^u)PcO1Mcpq-& z*?ke)h+Lh?I{*5659{8uQ9U*}emf5v{(1E2!Ro7zaEGR~=L7~c>wFQ{8?tL{cDdy~ zNPRUjE!KSHXSwff=7+y*R;|ce87mwgEsI7;kDzu2Hy=^8i+`Z?VxWN#@cgr7|KbJZ zO~l7ao6&ZVO+3}Un(~((hx8L=o(^>1jPJBBV0xN5u6xKXHbhN`3#-G-wb3(8Q$g4< z&$9(XrHusaL;6?!T8VjKmo@Ok*-Ej^ZNEfeICK zNM%%rgg{NB)OZQ+-bjBa19TJM;phBC;0|(7C%!ahi3NDn@6C95>$_v3$zwSFLZrsQC8os6RvWyt^=QP zKt%=^Nj0vF!~W~HGhTd|XLd}{a6>z2-hM!ix%0@9Jc97=Ofk8-r%e}4mQXYS!y|K0 zGKOWK&x>jAokq0^4aFSE0dxY#oi5sQ!ONZ>+mL~NmzutHy@jOkAX(5Gki&28=&{S5 z+pzgVe!IQ;%@fZy#~^oXHA}T7yHGW#9EKMv*In0PXpijcD?0iP6Nf3*FviMr6d7bj ztdvjNUu@m)hIAuE?J@s_xDRQLCNmL4(i;R4)HK1|Ivm8N@d~OA9D(oa@nB;}Rvjy1aOmd#>PUC!0~Hc!jBnyuj?TC}z@N#HBK+OD3D&4VWa$1{%ET zW=#Fjd_T;x+gACvMR1DH_DrR(!1BZ>ynipn9BdmtOtRe~h9xEN{|d(#u0Jr!FjuQX z#>D~`>5v5G7(Eug>-p(+OnDr@DKZd;Bta{5S-k-^)!}n(RCn;n+6h4`+d3kl~#n66BN|4K5 z%X3KrUWv>4i&vZ`LfapF0~Va8P?Z~L-sfYgrt6&X+R#dx0VVnN?=G>iP#ykkM(`;f z{J)md#np-gqxxv^%RtkM+PFr7<(Dt=&FZr?ew`^W@Ii)^D6*0io}az;{IL|74_L>d z8GtNs?Q8MO=cRF9iDi+kd!VUJ5et6F@Phc7^PG@MA;lJp-=tM9XzeQ%^8yTn2 z^)%UPrFNyQm5#9M{srmV6yB8$`EI4|p7*+XG#NeYL?LF_tbZSl3MHkDPzMuLaeICeAdyBoA*GB4xM>6z3kN5yB1+t$89X z0n~(cXQj2C{BoZmsu6<77l^FyDh}hqh@xhp!2+4Z<%-a!?ArStv^^7Ib>@tTO&)HE zvh7q{`zp4nKy1Zh4V<>3w*&X~l*;4so_FCxpr_aWlV#mc^P)?{-{CMO#TO@l(&sCt zru`-RcQLg_TAb6DqC}0zO$D*s2T~NNNAzNeX&~6~SQ&>kZT#VI8_3eM%4Eeo-{TPq zWuQUCWESh+7&UzyrYg1?I;Wqo7qF!+tx-0-t^)&mmf_?*PM-jR?z*=|^ByPq@N7>e#{!cnhxHog`8=cPZ2i$RB zLYKD~b2?6<+Sv8Y~DXw||I$xxMugZZ^M#RGZ2&+haaI%pJ* z?5C7Vm|VpOg2!e-@R<43on5^ylJGwYhf!+Ov32}$(Xr|GUDu;k(+ISN?VBpy6#(V~ z&8vMC(t{}ghsxV^W))C7-b|iI`2vw%uLj=QF}b6*ImmG z<5b?Q=&#uf?E#466Ys2$^HxdE3<3W6(&r^TQ>_2hI1*}lb=U+Y^d(&hX9qYcfkm>9@ zRBX1RS7`4`);ShUUPVLCT)V*y@>Jl}4X6bhB`PHvm+S>MK%SBPc+s>S_@ey~8;r`X zx!%^D%TaAGFyCUgtDW}IGS|djVT86)D|Y#NU;u{-2qfDv(6-1>@m!IF%noS`Wbpq{vTV|U#mYMSiu?Nmi0)KcYuK*`x z-!GNmh0o_0lZ$juilWo|OZ|W=nRQnbR->CDQ!H>|U?m9!T`g{w zgO*4Q6jWKG#e0u8P7dDN!hW!g>BYnkCfOj{)koweiO~41{U}49uebJ#|sxkX7V_3l%O}BLAyR;`>_Wj3VWbmcRMfghU}8x+51`m zTO*z!lq|ZslO${pV#!Myhee#uL-f--&wg`myN6iw_1yvp5BXOi93$HHnRi+Ke(z*D zc8i_zq&R-(W+#^ZfmHS z{8NJNyZym7DEn#YzU_u9d2~}1MQU})w|hE5u)pu=&<8%hbYB{hw+Dx9s{Q|#0J~)p z*%F$;m#UWK-ShXq_1)i+qiW8*UF4XfqtT2AufwI{BzRJIU638ai^v}I=s)e`<-wY^ zZIF)lfUR#w@;qP2>;B$!V!2ogB2@P9PgkQ-VJ)Z1)XGP7Rmt%pTp7d(&~ ztsn-ZJI+4gcC?k`?VkKl>;)WfeQie0(-5NNfNyvF5T47%{Ti3}c<9=+RL8X`p52$| zTk#uFeA8bEGmThmU5MYDFqJJI{Jri5=tW0^_bq(cWP2qoHyr_!_zC!+%}$syj{g$# E4=l=4mH+?% From fc6c32eaa1c17120b5d80b42a7d2b7d94e929636 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 15 Jul 2022 21:35:42 +0800 Subject: [PATCH 16/21] delete annotating code Signed-off-by: zhaohu xing <920232796@qq.com> --- flagai/model/vision/layers/weight_init.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flagai/model/vision/layers/weight_init.py b/flagai/model/vision/layers/weight_init.py index 305a2fd0..24c0fa7c 100755 --- a/flagai/model/vision/layers/weight_init.py +++ b/flagai/model/vision/layers/weight_init.py @@ -42,7 +42,6 @@ def norm_cdf(x): def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` From cd45e5c736a327e8378ad4d3f49cdd607fde983c Mon Sep 17 00:00:00 2001 From: zhaohu xing <32668889+920232796@users.noreply.github.com> Date: Fri, 15 Jul 2022 21:59:11 +0800 Subject: [PATCH 17/21] Vit xzh (#25) * add vit and examples * vit and examples * Update base_model.py remove unused glob * Update vit.py remove data statis * modify readme.md Signed-off-by: zhaohu xing <920232796@qq.com> * modify readme.md Signed-off-by: zhaohu xing <920232796@qq.com> * delete annotating code Signed-off-by: zhaohu xing <920232796@qq.com> Co-authored-by: Zac Liu --- README.md | 3 +- README_zh.md | 2 +- examples/vit_cifar100/README.md | 163 ++++++ examples/vit_cifar100/deepspeed.json | 48 ++ examples/vit_cifar100/hostfile | 1 + examples/vit_cifar100/train_DDP.py | 86 +++ examples/vit_cifar100/train_deepspeed.py | 87 +++ examples/vit_cifar100/train_single_gpu.py | 85 +++ examples/vit_cifar100/validate.py | 76 +++ flagai/auto_model/auto_loader.py | 115 ++-- flagai/launch.py | 12 +- flagai/model/base_model.py | 29 +- flagai/model/vision/layers/__init__.py | 42 ++ flagai/model/vision/layers/activations.py | 145 +++++ flagai/model/vision/layers/activations_jit.py | 90 ++++ flagai/model/vision/layers/activations_me.py | 218 ++++++++ .../vision/layers/adaptive_avgmax_pool.py | 118 +++++ .../model/vision/layers/attention_pool2d.py | 131 +++++ flagai/model/vision/layers/blur_pool.py | 42 ++ flagai/model/vision/layers/bottleneck_attn.py | 157 ++++++ flagai/model/vision/layers/cbam.py | 112 ++++ flagai/model/vision/layers/classifier.py | 56 ++ flagai/model/vision/layers/cond_conv2d.py | 123 +++++ flagai/model/vision/layers/config.py | 115 ++++ flagai/model/vision/layers/conv2d_same.py | 42 ++ flagai/model/vision/layers/conv_bn_act.py | 73 +++ flagai/model/vision/layers/create_act.py | 148 ++++++ flagai/model/vision/layers/create_attn.py | 89 ++++ flagai/model/vision/layers/create_conv2d.py | 36 ++ flagai/model/vision/layers/create_norm_act.py | 88 ++++ flagai/model/vision/layers/drop.py | 166 ++++++ flagai/model/vision/layers/eca.py | 145 +++++ flagai/model/vision/layers/evo_norm.py | 350 ++++++++++++ .../vision/layers/filter_response_norm.py | 68 +++ flagai/model/vision/layers/gather_excite.py | 90 ++++ flagai/model/vision/layers/global_context.py | 67 +++ flagai/model/vision/layers/halo_attn.py | 233 ++++++++ flagai/model/vision/layers/helpers.py | 31 ++ flagai/model/vision/layers/inplace_abn.py | 87 +++ flagai/model/vision/layers/lambda_layer.py | 133 +++++ flagai/model/vision/layers/linear.py | 19 + flagai/model/vision/layers/median_pool.py | 49 ++ flagai/model/vision/layers/mixed_conv2d.py | 51 ++ flagai/model/vision/layers/ml_decoder.py | 156 ++++++ flagai/model/vision/layers/mlp.py | 126 +++++ flagai/model/vision/layers/non_local_attn.py | 145 +++++ flagai/model/vision/layers/norm.py | 24 + flagai/model/vision/layers/norm_act.py | 151 ++++++ flagai/model/vision/layers/padding.py | 56 ++ flagai/model/vision/layers/patch_embed.py | 40 ++ flagai/model/vision/layers/pool2d_same.py | 73 +++ flagai/model/vision/layers/pos_embed.py | 207 ++++++++ .../model/vision/layers/selective_kernel.py | 119 +++++ flagai/model/vision/layers/separable_conv.py | 76 +++ flagai/model/vision/layers/space_to_depth.py | 53 ++ flagai/model/vision/layers/split_attn.py | 84 +++ flagai/model/vision/layers/split_batchnorm.py | 75 +++ flagai/model/vision/layers/squeeze_excite.py | 74 +++ flagai/model/vision/layers/std_conv.py | 133 +++++ flagai/model/vision/layers/test_time_pool.py | 52 ++ flagai/model/vision/layers/trace_utils.py | 13 + flagai/model/vision/layers/weight_init.py | 88 ++++ flagai/model/vision/vit.py | 496 ++++++++++++++++++ flagai/trainer.py | 199 ++++++- flagai/utils.py | 4 +- flagai_wechat.png | Bin 68218 -> 56710 bytes setup.py | 2 +- 67 files changed, 6397 insertions(+), 70 deletions(-) create mode 100644 examples/vit_cifar100/README.md create mode 100644 examples/vit_cifar100/deepspeed.json create mode 100644 examples/vit_cifar100/hostfile create mode 100644 examples/vit_cifar100/train_DDP.py create mode 100644 examples/vit_cifar100/train_deepspeed.py create mode 100644 examples/vit_cifar100/train_single_gpu.py create mode 100644 examples/vit_cifar100/validate.py create mode 100755 flagai/model/vision/layers/__init__.py create mode 100755 flagai/model/vision/layers/activations.py create mode 100755 flagai/model/vision/layers/activations_jit.py create mode 100755 flagai/model/vision/layers/activations_me.py create mode 100755 flagai/model/vision/layers/adaptive_avgmax_pool.py create mode 100755 flagai/model/vision/layers/attention_pool2d.py create mode 100755 flagai/model/vision/layers/blur_pool.py create mode 100755 flagai/model/vision/layers/bottleneck_attn.py create mode 100755 flagai/model/vision/layers/cbam.py create mode 100755 flagai/model/vision/layers/classifier.py create mode 100755 flagai/model/vision/layers/cond_conv2d.py create mode 100755 flagai/model/vision/layers/config.py create mode 100755 flagai/model/vision/layers/conv2d_same.py create mode 100755 flagai/model/vision/layers/conv_bn_act.py create mode 100755 flagai/model/vision/layers/create_act.py create mode 100755 flagai/model/vision/layers/create_attn.py create mode 100755 flagai/model/vision/layers/create_conv2d.py create mode 100755 flagai/model/vision/layers/create_norm_act.py create mode 100755 flagai/model/vision/layers/drop.py create mode 100755 flagai/model/vision/layers/eca.py create mode 100755 flagai/model/vision/layers/evo_norm.py create mode 100755 flagai/model/vision/layers/filter_response_norm.py create mode 100755 flagai/model/vision/layers/gather_excite.py create mode 100755 flagai/model/vision/layers/global_context.py create mode 100755 flagai/model/vision/layers/halo_attn.py create mode 100755 flagai/model/vision/layers/helpers.py create mode 100755 flagai/model/vision/layers/inplace_abn.py create mode 100755 flagai/model/vision/layers/lambda_layer.py create mode 100755 flagai/model/vision/layers/linear.py create mode 100755 flagai/model/vision/layers/median_pool.py create mode 100755 flagai/model/vision/layers/mixed_conv2d.py create mode 100755 flagai/model/vision/layers/ml_decoder.py create mode 100755 flagai/model/vision/layers/mlp.py create mode 100755 flagai/model/vision/layers/non_local_attn.py create mode 100755 flagai/model/vision/layers/norm.py create mode 100755 flagai/model/vision/layers/norm_act.py create mode 100755 flagai/model/vision/layers/padding.py create mode 100755 flagai/model/vision/layers/patch_embed.py create mode 100755 flagai/model/vision/layers/pool2d_same.py create mode 100755 flagai/model/vision/layers/pos_embed.py create mode 100755 flagai/model/vision/layers/selective_kernel.py create mode 100755 flagai/model/vision/layers/separable_conv.py create mode 100755 flagai/model/vision/layers/space_to_depth.py create mode 100755 flagai/model/vision/layers/split_attn.py create mode 100755 flagai/model/vision/layers/split_batchnorm.py create mode 100755 flagai/model/vision/layers/squeeze_excite.py create mode 100755 flagai/model/vision/layers/std_conv.py create mode 100755 flagai/model/vision/layers/test_time_pool.py create mode 100755 flagai/model/vision/layers/trace_utils.py create mode 100755 flagai/model/vision/layers/weight_init.py create mode 100644 flagai/model/vision/vit.py diff --git a/README.md b/README.md index 1d2990b0..37b40dac 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ FlagAI (Fast LArge-scale General AI models) is an fast, easy-to-use and extensib * FlagAI is backed by the three most popular data/model parallel libraries — [PyTorch](https://pytorch.org/)/[Deepspeed](https://www.deepspeed.ai/)/[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) — with seamless integration between them. Users can parallel their training/testing process with less than ten lines of code. -The code is partially based on [GLM](https://github.com/THUDM/GLM), [Transformers](https://github.com/huggingface/transformers) and [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). +The code is partially based on [GLM](https://github.com/THUDM/GLM), [Transformers](https://github.com/huggingface/transformers), [timm](https://github.com/rwightman/pytorch-image-models) and [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). @@ -156,7 +156,6 @@ start with our [contributor guidelines](CONTRIBUTING.md) and then check these [open issues](https://github.com/BAAI-Open/FlagAI/issues) for specific tasks. ## Contact us -Scan wechat QR code diff --git a/README_zh.md b/README_zh.md index 88def64d..e8dc5a3f 100644 --- a/README_zh.md +++ b/README_zh.md @@ -18,7 +18,7 @@ * 飞智由三个最流行的数据/模型并行库([PyTorch](https://pytorch.org/)/[Deepspeed](https://www.deepspeed.ai/)/[Megatron-LM](https://github.com/NVIDIA/Megatron-LM))提供支持,它们之间实现了无缝集成。 你可以用不到十行代码来并行你的训练/测试过程。 -本项目的部分代码基于[GLM](https://github.com/THUDM/GLM),[Transformers](https://github.com/huggingface/transformers) 和 [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). +本项目的部分代码基于 [GLM](https://github.com/THUDM/GLM),[Transformers](https://github.com/huggingface/transformers),[timm](https://github.com/rwightman/pytorch-image-models) 和 [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). diff --git a/examples/vit_cifar100/README.md b/examples/vit_cifar100/README.md new file mode 100644 index 00000000..1cd35edd --- /dev/null +++ b/examples/vit_cifar100/README.md @@ -0,0 +1,163 @@ +# Vit for classification with cifar100 dataset + +Vision Transformer(Vit) is becoming increasingly popular in the field of +compute vision(CV). More and more tasks are using Vit to achieve the SOTA. + +The paper is in https://arxiv.org/pdf/2010.11929.pdf. + +Code is in https://github.com/google-research/vision_transformer. + +## How to use +We can easily use the Vit to finetune cifar100 dataset. +### Training +```python +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +trainer = Trainer( + env_type="pytorch", + experiment_name="vit-cifar100", + batch_size=64, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100", + save_interval=1000, + num_checkpoints=1, +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + + +if __name__ == '__main__': + loader = AutoLoader(task_name="backbone", + model_name="Vit-base-p16", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + + train_dataset, val_dataset = build_cifar() + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) +``` + +### Validation +If you have trained a model, you can valite it again by following code. +```python +import torch +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.auto_model.auto_loader import AutoLoader +import os +from tqdm import tqdm + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def build_cifar(): + + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + + model_save_dir = "./checkpoints_vit_cifar100" + print(f"loadding model in :{model_save_dir}") + loader = AutoLoader(task_name="backbone", + model_name="Vit-base-p16", + num_classes=100) + + model = loader.get_model() + + model.load_state_dict(torch.load(os.path.join(model_save_dir, "38000", "pytorch_model.bin"), map_location=device)["module"]) + print(f"model load success.......") + model.to(device) + + val_dataset = build_cifar() + + val_dataloader = DataLoader(val_dataset, + batch_size=1, + shuffle=False, + collate_fn=collate_fn) + index = 0 + accuracy = 0.0 + for data in tqdm(val_dataloader, total=len(val_dataloader)): + index += 1 + data = {k: v.to(device) for k, v in data.items()} + labels = data["labels"] + pred = model(**data)["logits"] + acc = validate(pred, labels) + accuracy += acc + + print(f"accuracy is {accuracy / index}") +``` diff --git a/examples/vit_cifar100/deepspeed.json b/examples/vit_cifar100/deepspeed.json new file mode 100644 index 00000000..f2339ca3 --- /dev/null +++ b/examples/vit_cifar100/deepspeed.json @@ -0,0 +1,48 @@ +{ + "train_micro_batch_size_per_gpu": 64, + "gradient_accumulation_steps": 1, + "steps_per_print": 100, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 2, + "contiguous_gradients": false, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 5e7, + "allgather_bucket_size": 5e7, + "cpu_offload": true + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": 1e-5, + "warmup_num_steps": 2000 + } + }, + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "Adam", + "params": { + "lr": 1e-5, + "weight_decay": 0.1, + "betas": [ + 0.9, + 0.98 + ], + "eps": 1e-6 + } + }, + "activation_checkpointing": { + "partition_activations": true, + "contiguous_memory_optimization": false + }, + "wall_clock_breakdown": false + } diff --git a/examples/vit_cifar100/hostfile b/examples/vit_cifar100/hostfile new file mode 100644 index 00000000..51356577 --- /dev/null +++ b/examples/vit_cifar100/hostfile @@ -0,0 +1 @@ +127.0.0.1 slots=2 \ No newline at end of file diff --git a/examples/vit_cifar100/train_DDP.py b/examples/vit_cifar100/train_DDP.py new file mode 100644 index 00000000..06f5cd1a --- /dev/null +++ b/examples/vit_cifar100/train_DDP.py @@ -0,0 +1,86 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "pytorchDDP" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-8gpu", + batch_size=150, + num_gpus=8, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_8gpu", + save_interval=1000, + num_checkpoints=1, + hostfile="./hostfile", +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/examples/vit_cifar100/train_deepspeed.py b/examples/vit_cifar100/train_deepspeed.py new file mode 100644 index 00000000..27d46628 --- /dev/null +++ b/examples/vit_cifar100/train_deepspeed.py @@ -0,0 +1,87 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "deepspeed" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-deepspeed", + batch_size=150, + num_gpus=8, + fp16=True, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_deepspeed", + save_interval=1000, + num_checkpoints=1, + hostfile="./hostfile", +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/examples/vit_cifar100/train_single_gpu.py b/examples/vit_cifar100/train_single_gpu.py new file mode 100644 index 00000000..ef7e1356 --- /dev/null +++ b/examples/vit_cifar100/train_single_gpu.py @@ -0,0 +1,85 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "pytorch" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-single_gpu", + batch_size=150, + num_gpus=1, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_single_gpu", + save_interval=1000, + num_checkpoints=1, +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/examples/vit_cifar100/validate.py b/examples/vit_cifar100/validate.py new file mode 100644 index 00000000..e52eb113 --- /dev/null +++ b/examples/vit_cifar100/validate.py @@ -0,0 +1,76 @@ +import torch +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.auto_model.auto_loader import AutoLoader +import os +from tqdm import tqdm + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def build_cifar(): + + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + + model_save_dir = "./checkpoints_vit_cifar100" + print(f"loadding model in :{model_save_dir}") + loader = AutoLoader(task_name="backbone", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + + model.load_state_dict(torch.load(os.path.join(model_save_dir, "38000", "pytorch_model.bin"), map_location=device)["module"]) + print(f"model load success.......") + model.to(device) + + val_dataset = build_cifar() + + val_dataloader = DataLoader(val_dataset, + batch_size=1, + shuffle=False, + collate_fn=collate_fn) + index = 0 + accuracy = 0.0 + for data in tqdm(val_dataloader, total=len(val_dataloader)): + index += 1 + data = {k: v.to(device) for k, v in data.items()} + labels = data["labels"] + pred = model(**data)["logits"] + acc = validate(pred, labels) + accuracy += acc + + print(f"accuracy is {accuracy / index}") + + + + + + + + + + diff --git a/flagai/auto_model/auto_loader.py b/flagai/auto_model/auto_loader.py index 37d7ca90..22e2273b 100644 --- a/flagai/auto_model/auto_loader.py +++ b/flagai/auto_model/auto_loader.py @@ -54,26 +54,37 @@ def __getattr__(self, name): "glm_title-generation": ["flagai.model.glm_model", "GLMForSeq2Seq"], "opt_seq2seq": ("flagai.model.opt_model","OPTModel"), "opt_lm": ("flagai.model.opt_model","OPTModel"), + "vit_classification": ("flagai.model.vision.vit", "VisionTransformer") + } MODEL_DICT = { - "bert-base-en": ["flagai.model.bert_model", "BertModel", "bert"], - "roberta-base-ch": ["flagai.model.bert_model", "BertModel", "bert"], - "t5-base-en": ["flagai.model.t5_model", "T5Model", "t5"], - "t5-base-ch": ["flagai.model.t5_model", "T5Model", "t5"], - "glm-large-ch": ["flagai.model.glm_model", "GLMModel", "glm"], - "glm-large-en": ["flagai.model.glm_model", "GLMModel", "glm"], - "gpt2-base-ch": ["flagai.model.gpt2_model", "GPT2Model", "gpt2"], - "cpm-large-ch": ["flagai.model.gpt2_model", "GPT2Model", "cpm"], - "opt-125m-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-350m-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-1.3b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-2.7b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-6.7b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-13b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-30b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "opt-66b-en": ["flagai.model.opt_model","OPTModel", "opt"], - "glm-10b-ch": ["flagai.model.glm_model", "GLMModel", "glm"], + "bert-base-en": ["flagai.model.bert_model", "BertModel", "bert", "nlp"], + "roberta-base-ch": ["flagai.model.bert_model", "BertModel", "bert", "nlp"], + "t5-base-en": ["flagai.model.t5_model", "T5Model", "t5", "nlp"], + "t5-base-ch": ["flagai.model.t5_model", "T5Model", "t5", "nlp"], + "glm-large-ch": ["flagai.model.glm_model", "GLMModel", "glm", "nlp"], + "glm-large-en": ["flagai.model.glm_model", "GLMModel", "glm", "nlp"], + "gpt2-base-ch": ["flagai.model.gpt2_model", "GPT2Model", "gpt2", "nlp"], + "cpm-large-ch": ["flagai.model.gpt2_model", "GPT2Model", "cpm", "nlp"], + "opt-125m-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-350m-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-1.3b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-2.7b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-6.7b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-13b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-30b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "opt-66b-en": ["flagai.model.opt_model","OPTModel", "opt", "nlp"], + "glm-10b-ch": ["flagai.model.glm_model", "GLMModel", "glm", "nlp"], + + "vit-base-p16-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-base-p16-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-base-p32-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-base-p32-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p16-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p16-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p32-224":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], + "vit-large-p32-384":["flagai.model.vision.vit", "VisionTransformer", "vit", "vision"], } TOKENIZER_DICT = { @@ -106,7 +117,6 @@ def __getattr__(self, name): } - class AutoLoader: def __init__(self, @@ -153,6 +163,8 @@ def __init__(self, return brief_model_name = MODEL_DICT[model_name][2] + model_type = MODEL_DICT[model_name][3] + # The dir to save config, vocab and model. self.model_name = ALL_TASK.get(f"{brief_model_name}_{task_name}", None) @@ -184,38 +196,41 @@ def __init__(self, model_id = _get_model_id(model_name) print("*"*20, task_name, model_id, model_name) - if "glm" in model_name and "ch" in model_name: - vocab_file = os.path.join(download_path,'cog-pretrained.model') - if not os.path.exists(vocab_file): - vocab_file = _get_vocab_path(download_path, "cog-pretrain.model", model_id) - elif "glm" in model_name and "en" in model_name: - vocab_file = "GLM-large-en" - elif model_name == "cpm-large-ch": - # two files to load - vocab_file_1 = os.path.join(download_path, "vocab.json") - vocab_file_2 = os.path.join(download_path, "chinese_vocab.model") - if not os.path.exists(vocab_file_1): - vocab_file_1 = _get_vocab_path(download_path, "vocab.json", - model_id) - if not os.path.exists(vocab_file_2): - vocab_file_2 = _get_vocab_path(download_path, - "chinese_vocab.model", model_id) - else: - vocab_file = os.path.join(download_path, 'vocab.txt') - if not os.path.exists(vocab_file): - vocab_file = _get_vocab_path(download_path, "vocab.txt", - model_id) - tokenizer_class = TOKENIZER_DICT[model_name] - tokenizer_class = getattr(LazyImport(tokenizer_class[0]), - tokenizer_class[1]) - if model_name == "cpm-large-ch": - self.tokenizer = tokenizer_class(vocab_file_1, vocab_file_2) - elif brief_model_name == "opt": - self.tokenizer = tokenizer_class("facebook/opt-350m") - elif model_name in ["glm-large-en", "glm-large-ch"]: - self.tokenizer = tokenizer_class() - else : - self.tokenizer = tokenizer_class(vocab_file) + if model_type == "nlp": + if "glm" in model_name and "ch" in model_name: + vocab_file = os.path.join(download_path,'cog-pretrained.model') + if not os.path.exists(vocab_file): + vocab_file = _get_vocab_path(download_path, "cog-pretrain.model", model_id) + elif "glm" in model_name and "en" in model_name: + vocab_file = "GLM-large-en" + elif model_name == "cpm-large-ch": + # two files to load + vocab_file_1 = os.path.join(download_path, "vocab.json") + vocab_file_2 = os.path.join(download_path, "chinese_vocab.model") + if not os.path.exists(vocab_file_1): + vocab_file_1 = _get_vocab_path(download_path, "vocab.json", + model_id) + if not os.path.exists(vocab_file_2): + vocab_file_2 = _get_vocab_path(download_path, + "chinese_vocab.model", model_id) + else: + vocab_file = os.path.join(download_path, 'vocab.txt') + if not os.path.exists(vocab_file): + vocab_file = _get_vocab_path(download_path, "vocab.txt", + model_id) + tokenizer_class = TOKENIZER_DICT[model_name] + tokenizer_class = getattr(LazyImport(tokenizer_class[0]), + tokenizer_class[1]) + if model_name == "cpm-large-ch": + self.tokenizer = tokenizer_class(vocab_file_1, vocab_file_2) + elif brief_model_name == "opt": + self.tokenizer = tokenizer_class("facebook/opt-350m") + elif model_name in ["glm-large-en", "glm-large-ch"]: + self.tokenizer = tokenizer_class() + else : + self.tokenizer = tokenizer_class(vocab_file) + elif model_type == "vision": + self.tokenizer = None def get_task_name(self, brief_model_name): all_model_task = list(ALL_TASK.keys()) diff --git a/flagai/launch.py b/flagai/launch.py index 3dcfe22b..ecba3254 100644 --- a/flagai/launch.py +++ b/flagai/launch.py @@ -74,7 +74,8 @@ def launch_dist(launcher='distributed_deepspeed', hostfile='hostfile', nccl_info=False, training_script='train.py', - training_script_paras=None): + training_script_paras=None, + training_paras=None,): try: resource_pool = fetch_hostfile(hostfile) except: @@ -151,6 +152,9 @@ def launch_dist(launcher='distributed_deepspeed', ] cmd_launch.extend(torch_distributed_args) cmd_launch.append(training_script) + if training_paras: + cmd_launch.extend(training_paras) + cmd_launch.append('--not_call_launch') run_cmd = ' '.join(cmd_launch) log_dist(run_cmd) @@ -196,6 +200,9 @@ def launch_dist(launcher='distributed_deepspeed', if len(training_script_paras) > 0: cmd_launch.extend(training_script_paras) + if training_paras: + cmd_launch.extend(training_paras) + cmd_launch.append('--not_call_launch') run_cmd = ' '.join(cmd_launch) log_dist(run_cmd) @@ -226,6 +233,9 @@ def launch_dist(launcher='distributed_deepspeed', if len(training_script_paras) > 0: cmd_launch.extend(training_script_paras) + if training_paras: + cmd_launch.extend(training_paras) + run_cmd = ' '.join(cmd_launch) log_dist(run_cmd) subprocess.Popen(run_cmd, shell=True) diff --git a/flagai/model/base_model.py b/flagai/model/base_model.py index 5480b73b..c600cfb4 100644 --- a/flagai/model/base_model.py +++ b/flagai/model/base_model.py @@ -9,7 +9,6 @@ from flagai.model.file_utils import _get_model_id, _get_config_path, _get_checkpoint_path, _get_vocab_path, _get_model_files import os - # The base model for models class BaseModel(Module): @@ -59,12 +58,34 @@ def from_pretrain(cls, # downloading the files model: Union[Module, None] if model_id and model_id != "null": + model_files = eval(_get_model_files(model_name)) if not os.path.exists(os.path.join(download_path, 'vocab.txt')): - _get_vocab_path(download_path, "vocab.txt", model_id) + if "vocab.txt" in model_files: + _get_vocab_path(download_path, "vocab.txt", model_id) if not only_download_config and not os.path.exists(os.path.join(download_path, 'config.json')): - model_files = eval(_get_model_files(model_name)) - if 'pytorch_model.bin' in model_files: + if os.getenv('ENV_TYPE') == 'deepspeed+mpu': + model_parallel_size = int(os.getenv("MODEL_PARALLEL_SIZE")) + if model_parallel_size > 1: + # if gpus == nums_of_modelhub_models + # can load + # else need to download the pytorch_model.bin and to recut. + model_hub_parallel_size = 0 + for f in model_files: + if "pytorch_model_" in f: + model_hub_parallel_size += 1 + else: + model_parallel_size = 1 + + if "pytorch_model_01.bin" in model_files and model_parallel_size > 1 and model_hub_parallel_size == model_parallel_size: + # Only to download the model slices(megatron-lm). + for file_to_load in model_files: + if "pytorch_model_" in file_to_load: + _get_checkpoint_path(download_path, + file_to_load, + model_id) + + elif 'pytorch_model.bin' in model_files: checkpoint_path = _get_checkpoint_path(download_path, 'pytorch_model.bin', model_id) diff --git a/flagai/model/vision/layers/__init__.py b/flagai/model/vision/layers/__init__.py new file mode 100755 index 00000000..7e9e7b19 --- /dev/null +++ b/flagai/model/vision/layers/__init__.py @@ -0,0 +1,42 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ + EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a +from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp, ConvMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvNormAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .trace_utils import _assert, _float_to_int +from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ diff --git a/flagai/model/vision/layers/activations.py b/flagai/model/vision/layers/activations.py new file mode 100755 index 00000000..e16b3bd3 --- /dev/null +++ b/flagai/model/vision/layers/activations.py @@ -0,0 +1,145 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + NOTE: I don't have a working inplace variant + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + + def forward(self, x): + return mish(x) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + +def hard_mish(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + if inplace: + return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) + else: + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_mish(x, self.inplace) + + +class PReLU(nn.PReLU): + """Applies PReLU (w/ dummy inplace arg) + """ + def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/flagai/model/vision/layers/activations_jit.py b/flagai/model/vision/layers/activations_jit.py new file mode 100755 index 00000000..b4a51653 --- /dev/null +++ b/flagai/model/vision/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/flagai/model/vision/layers/activations_me.py b/flagai/model/vision/layers/activations_me.py new file mode 100755 index 00000000..9a12bb7e --- /dev/null +++ b/flagai/model/vision/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/flagai/model/vision/layers/adaptive_avgmax_pool.py b/flagai/model/vision/layers/adaptive_avgmax_pool.py new file mode 100755 index 00000000..ebc6ada8 --- /dev/null +++ b/flagai/model/vision/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/flagai/model/vision/layers/attention_pool2d.py b/flagai/model/vision/layers/attention_pool2d.py new file mode 100755 index 00000000..a13a6881 --- /dev/null +++ b/flagai/model/vision/layers/attention_pool2d.py @@ -0,0 +1,131 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .pos_embed import apply_rot_embed, RotaryEmbedding +from .weight_init import trunc_normal_ + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + sin_emb, cos_emb = self.pos_embed.get_embed((H, W)) + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/flagai/model/vision/layers/blur_pool.py b/flagai/model/vision/layers/blur_pool.py new file mode 100755 index 00000000..e73d8863 --- /dev/null +++ b/flagai/model/vision/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=self.channels) diff --git a/flagai/model/vision/layers/bottleneck_attn.py b/flagai/model/vision/layers/bottleneck_attn.py new file mode 100755 index 00000000..c3db464e --- /dev/null +++ b/flagai/model/vision/layers/bottleneck_attn.py @@ -0,0 +1,157 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). + num_heads (int): parallel attention heads (default: 4) + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, + qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + + self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.pos_embed.height, '') + _assert(W == self.pos_embed.width, '') + + x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W + + # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v + # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. + q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) + k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k + v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) + + if self.scale_pos_embed: + attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W + else: + attn = (q @ k) * self.scale + self.pos_embed(q) + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W + out = self.pool(out) + return out diff --git a/flagai/model/vision/layers/cbam.py b/flagai/model/vision/layers/cbam.py new file mode 100755 index 00000000..576a8306 --- /dev/null +++ b/flagai/model/vision/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvNormAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/flagai/model/vision/layers/classifier.py b/flagai/model/vision/layers/classifier.py new file mode 100755 index 00000000..3ac33387 --- /dev/null +++ b/flagai/model/vision/layers/classifier.py @@ -0,0 +1,56 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + fc = nn.Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + if pre_logits: + return x.flatten(1) + else: + x = self.fc(x) + return self.flatten(x) diff --git a/flagai/model/vision/layers/cond_conv2d.py b/flagai/model/vision/layers/cond_conv2d.py new file mode 100755 index 00000000..43654c59 --- /dev/null +++ b/flagai/model/vision/layers/cond_conv2d.py @@ -0,0 +1,123 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + # reshape instead of view to work with channels_last input + x = x.reshape(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/flagai/model/vision/layers/config.py b/flagai/model/vision/layers/config.py new file mode 100755 index 00000000..f07b9d78 --- /dev/null +++ b/flagai/model/vision/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/flagai/model/vision/layers/conv2d_same.py b/flagai/model/vision/layers/conv2d_same.py new file mode 100755 index 00000000..75f0f98d --- /dev/null +++ b/flagai/model/vision/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/flagai/model/vision/layers/conv_bn_act.py b/flagai/model/vision/layers/conv_bn_act.py new file mode 100755 index 00000000..af010573 --- /dev/null +++ b/flagai/model/vision/layers/conv_bn_act.py @@ -0,0 +1,73 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer + + +class ConvNormAct(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, drop_layer=None): + super(ConvNormAct, self).__init__() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +ConvBnAct = ConvNormAct + + +class ConvNormActAa(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, drop_layer=None): + super(ConvNormActAa, self).__init__() + use_aa = aa_layer is not None + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else nn.Identity() + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.aa(x) + return x diff --git a/flagai/model/vision/layers/create_act.py b/flagai/model/vision/layers/create_act.py new file mode 100755 index 00000000..e38f2e03 --- /dev/null +++ b/flagai/model/vision/layers/create_act.py @@ -0,0 +1,148 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if not isinstance(name, str): + # callable, module, etc + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs) diff --git a/flagai/model/vision/layers/create_attn.py b/flagai/model/vision/layers/create_attn.py new file mode 100755 index 00000000..028c0f75 --- /dev/null +++ b/flagai/model/vision/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type is not None: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/flagai/model/vision/layers/create_conv2d.py b/flagai/model/vision/layers/create_conv2d.py new file mode 100755 index 00000000..ac9489ce --- /dev/null +++ b/flagai/model/vision/layers/create_conv2d.py @@ -0,0 +1,36 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + if 'groups' in kwargs: + groups = kwargs.pop('groups') + if groups == in_channels: + kwargs['depthwise'] = True + else: + assert groups == 1 + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/flagai/model/vision/layers/create_norm_act.py b/flagai/model/vision/layers/create_norm_act.py new file mode 100755 index 00000000..cd15c2f8 --- /dev/null +++ b/flagai/model/vision/layers/create_norm_act.py @@ -0,0 +1,88 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +from .evo_norm import * +from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d +from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d +from .inplace_abn import InplaceAbn + +_NORM_ACT_MAP = dict( + batchnorm=BatchNormAct2d, + batchnorm2d=BatchNormAct2d, + groupnorm=GroupNormAct, + layernorm=LayerNormAct, + layernorm2d=LayerNormAct2d, + evonormb0=EvoNorm2dB0, + evonormb1=EvoNorm2dB1, + evonormb2=EvoNorm2dB2, + evonorms0=EvoNorm2dS0, + evonorms0a=EvoNorm2dS0a, + evonorms1=EvoNorm2dS1, + evonorms1a=EvoNorm2dS1a, + evonorms2=EvoNorm2dS2, + evonorms2a=EvoNorm2dS2a, + frn=FilterResponseNormAct2d, + frntlu=FilterResponseNormTlu2d, + inplaceabn=InplaceAbn, + iabn=InplaceAbn, +) +_NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} +# has act_layer arg to define act type +_NORM_ACT_REQUIRES_ARG = { + BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} + + +def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): + layer = get_norm_act_layer(layer_name, act_layer=act_layer) + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def get_norm_act_layer(norm_layer, act_layer=None): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + layer_name = norm_layer.replace('_', '').lower().split('-')[0] + norm_act_layer = _NORM_ACT_MAP.get(layer_name, None) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + elif type_name.startswith('layernorm2d'): + norm_act_layer = LayerNormAct2d + elif type_name.startswith('layernorm'): + norm_act_layer = LayerNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/flagai/model/vision/layers/drop.py b/flagai/model/vision/layers/drop.py new file mode 100755 index 00000000..ae065277 --- /dev/null +++ b/flagai/model/vision/layers/drop.py @@ -0,0 +1,166 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + block_mask = torch.empty_like(x).bernoulli_(gamma) + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.empty_like(x).normal_() + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + + def __init__( + self, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False, + fast: bool = True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) diff --git a/flagai/model/vision/layers/eca.py b/flagai/model/vision/layers/eca.py new file mode 100755 index 00000000..e29be6ac --- /dev/null +++ b/flagai/model/vision/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/flagai/model/vision/layers/evo_norm.py b/flagai/model/vision/layers/evo_norm.py new file mode 100755 index 00000000..b643302c --- /dev/null +++ b/flagai/model/vision/layers/evo_norm.py @@ -0,0 +1,350 @@ +""" EvoNorm in PyTorch + +Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 +@inproceedings{NEURIPS2020, + author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {13539--13550}, + publisher = {Curran Associates, Inc.}, + title = {Evolving Normalization-Activation Layers}, + url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, + volume = {33}, + year = {2020} +} + +An attempt at getting decent performing EvoNorms running in PyTorch. +While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm +in terms of memory usage and throughput on GPUs. + +I'm testing these modules on TPU w/ PyTorch XLA. Promising start but +currently working around some issues with builtin torch/tensor.var/std. Unlike +GPU, similar train speeds for EvoNormS variants and BatchNorm. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Sequence, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def instance_std(x, eps: float = 1e-5): + std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) + return std.expand(x.shape) + + +def instance_std_tpu(x, eps: float = 1e-5): + std = manual_var(x, dim=(2, 3)).add(eps).sqrt() + return std.expand(x.shape) +# instance_std = instance_std_tpu + + +def instance_rms(x, eps: float = 1e-5): + rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) + return rms.expand(x.shape) + + +def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): + xm = x.mean(dim=dim, keepdim=True) + if diff_sqm: + # difference of squared mean and mean squared, faster on TPU can be less stable + var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) + else: + var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) + return var + + +def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): + B, C, H, W = x.shape + x_dtype = x.dtype + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + else: + x = x.reshape(B, groups, C // groups, H, W) + std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + return std.expand(x.shape).reshape(B, C, H, W) + + +def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): + # This is a workaround for some stability / odd behaviour of .var and .std + # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results + B, C, H, W = x.shape + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + var = manual_var(x, dim=-1, diff_sqm=diff_sqm) + else: + x = x.reshape(B, groups, C // groups, H, W) + var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) + return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) +#group_std = group_std_tpu # FIXME TPU temporary + + +def group_rms(x, groups: int = 32, eps: float = 1e-5): + B, C, H, W = x.shape + _assert(C % groups == 0, '') + x_dtype = x.dtype + x = x.reshape(B, groups, C // groups, H, W) + rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) + return rms.expand(x.shape).reshape(B, C, H, W) + + +class EvoNorm2dB0(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + # var = manual_var(x, dim=(0, 2, 3)).squeeze() + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach() * self.momentum * (n / (n - 1))) + else: + var = self.running_var + left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) + v = self.v.to(x_dtype).view(v_shape) + right = x * v + instance_std(x, self.eps) + x = x / left.max(right) + return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) + + +class EvoNorm2dB1(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = (x + 1) * instance_rms(x, self.eps) + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dB2(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = instance_rms(x, self.eps) - x + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0(nn.Module): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0a(EvoNorm2dS0): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + d = group_std(x, self.groups, self.eps) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() + x = x / d + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.pre_act_norm = False + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1a(EvoNorm2dS1): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2a(EvoNorm2dS2): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=nn.SiLU, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) diff --git a/flagai/model/vision/layers/filter_response_norm.py b/flagai/model/vision/layers/filter_response_norm.py new file mode 100755 index 00000000..a66a1cd4 --- /dev/null +++ b/flagai/model/vision/layers/filter_response_norm.py @@ -0,0 +1,68 @@ +""" Filter Response Norm in PyTorch + +Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def inv_instance_rms(x, eps: float = 1e-5): + rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) + return rms.expand(x.shape) + + +class FilterResponseNormTlu2d(nn.Module): + def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): + super(FilterResponseNormTlu2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.tau is not None: + nn.init.zeros_(self.tau) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x + + +class FilterResponseNormAct2d(nn.Module): + def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): + super(FilterResponseNormAct2d, self).__init__() + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer, inplace=inplace) + else: + self.act = nn.Identity() + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return self.act(x) diff --git a/flagai/model/vision/layers/gather_excite.py b/flagai/model/vision/layers/gather_excite.py new file mode 100755 index 00000000..2d60dc96 --- /dev/null +++ b/flagai/model/vision/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/flagai/model/vision/layers/global_context.py b/flagai/model/vision/layers/global_context.py new file mode 100755 index 00000000..de7fb5c1 --- /dev/null +++ b/flagai/model/vision/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/flagai/model/vision/layers/halo_attn.py b/flagai/model/vision/layers/halo_attn.py new file mode 100755 index 00000000..f2ac64f8 --- /dev/null +++ b/flagai/model/vision/layers/halo_attn.py @@ -0,0 +1,233 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) + stride: output stride of the module, query downscaled if > 1 (default: 1). + num_heads: parallel attention heads (default: 8). + dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + block_size (int): size of blocks. (default: 8) + halo_size (int): size of halo overlap. (default: 3) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool) : add bias to q, k, and v projections + avg_down (bool): use average pool downsample instead of strided query blocks + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, + qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + assert stride in (1, 2) + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + self.block_size = self.block_size_ds = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.block_stride = 1 + use_avg_pool = False + if stride > 1: + use_avg_pool = avg_down or block_size % stride != 0 + self.block_stride = 1 if use_avg_pool else stride + self.block_size_ds = self.block_size // self.block_stride + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H % self.block_size == 0, '') + _assert(W % self.block_size == 0, '') + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + + q = self.q(x) + # unfold + q = q.reshape( + -1, self.dim_head_qk, + num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not + # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. + # FIXME figure out how to switch impl between this and conv2d if XLA being used. + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + if self.scale_pos_embed: + attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale + else: + attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) + # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view( + B, self.dim_out_v, H // self.block_stride, W // self.block_stride) + # B, dim_out, H // block_stride, W // block_stride + out = self.pool(out) + return out + + +""" Three alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if is_xla: + # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is + # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. + WW = self.win_size ** 2 + pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) + kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) + elif self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/flagai/model/vision/layers/helpers.py b/flagai/model/vision/layers/helpers.py new file mode 100755 index 00000000..cc54ca7f --- /dev/null +++ b/flagai/model/vision/layers/helpers.py @@ -0,0 +1,31 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v diff --git a/flagai/model/vision/layers/inplace_abn.py b/flagai/model/vision/layers/inplace_abn.py new file mode 100755 index 00000000..a8088933 --- /dev/null +++ b/flagai/model/vision/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_layer=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer is None or act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/flagai/model/vision/layers/lambda_layer.py b/flagai/model/vision/layers/lambda_layer.py new file mode 100755 index 00000000..e50b43c8 --- /dev/null +++ b/flagai/model/vision/layers/lambda_layer.py @@ -0,0 +1,133 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ + + +def rel_pos_indices(size): + size = to_2tuple(size) + pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) + rel_pos = pos[:, None, :] - pos[:, :, None] + rel_pos[0] += size[0] - 1 + rel_pos[1] += size[1] - 1 + return rel_pos # 2, H * W, H * W + + +class LambdaLayer(nn.Module): + """Lambda Layer + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + + NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. + + The internal dimensions of the lambda module are controlled via the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query (q) and key (k) dimension are determined by + * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None + * q = num_heads * dim_head, k = dim_head + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W + stride (int): output stride of the module, avg pool used if stride == 2 + num_heads (int): parallel attention heads. + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, + qk_ratio=1.0, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.num_heads = num_heads + self.dim_v = dim_out // num_heads + + self.qkv = nn.Conv2d( + dim, + num_heads * self.dim_qk + self.dim_qk + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + if r is not None: + # local lambda convolution for pos + self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) + self.pos_emb = None + self.rel_pos_indices = None + else: + # relative pos embedding + assert feat_size is not None + feat_size = to_2tuple(feat_size) + rel_size = [2 * s - 1 for s in feat_size] + self.conv_lambda = None + self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) + self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + if self.conv_lambda is not None: + trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) + if self.pos_emb is not None: + trunc_normal_(self.pos_emb, std=.02) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + if self.pos_emb is None: + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + else: + # FIXME relative pos embedding path not fully verified + pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) + position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/flagai/model/vision/layers/linear.py b/flagai/model/vision/layers/linear.py new file mode 100755 index 00000000..38fe3380 --- /dev/null +++ b/flagai/model/vision/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/flagai/model/vision/layers/median_pool.py b/flagai/model/vision/layers/median_pool.py new file mode 100755 index 00000000..40bd71a7 --- /dev/null +++ b/flagai/model/vision/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/flagai/model/vision/layers/mixed_conv2d.py b/flagai/model/vision/layers/mixed_conv2d.py new file mode 100755 index 00000000..fa0ce565 --- /dev/null +++ b/flagai/model/vision/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/flagai/model/vision/layers/ml_decoder.py b/flagai/model/vision/layers/ml_decoder.py new file mode 100755 index 00000000..3f828c6d --- /dev/null +++ b/flagai/model/vision/layers/ml_decoder.py @@ -0,0 +1,156 @@ +from typing import Optional + +import torch +from torch import nn +from torch import nn, Tensor +from torch.nn.modules.transformer import _get_activation_fn + + +def add_ml_decoder_head(model): + if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 + model.global_pool = nn.Identity() + del model.fc + num_classes = model.num_classes + num_features = model.num_features + model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet + model.global_pool = nn.Identity() + del model.classifier + num_classes = model.num_classes + num_features = model.num_features + model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') + del model.head + num_classes = model.num_classes + num_features = model.num_features + model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + else: + print("Model code-writing is not aligned currently with ml-decoder") + exit(-1) + if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout + model.drop_rate = 0 + return model + + +class TransformerDecoderLayerOptimal(nn.Module): + def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", + layer_norm_eps=1e-5) -> None: + super(TransformerDecoderLayerOptimal, self).__init__() + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.dropout = nn.Dropout(dropout) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) + + self.activation = _get_activation_fn(activation) + + def __setstate__(self, state): + if 'activation' not in state: + state['activation'] = torch.nn.functional.relu + super(TransformerDecoderLayerOptimal, self).__setstate__(state) + + def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: + tgt = tgt + self.dropout1(tgt) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(tgt, memory, memory)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + +# @torch.jit.script +# class ExtrapClasses(object): +# def __init__(self, num_queries: int, group_size: int): +# self.num_queries = num_queries +# self.group_size = group_size +# +# def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: +# torch.Tensor): +# # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) +# h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) +# w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) +# out = (h * w).sum(dim=2) + class_embed_b +# out = out.view((h.shape[0], self.group_size * self.num_queries)) +# return out + +@torch.jit.script +class GroupFC(object): + def __init__(self, embed_len_decoder: int): + self.embed_len_decoder = embed_len_decoder + + def __call__(self, h: torch.Tensor, duplicate_pooling: torch.Tensor, out_extrap: torch.Tensor): + for i in range(self.embed_len_decoder): + h_i = h[:, i, :] + w_i = duplicate_pooling[i, :, :] + out_extrap[:, i, :] = torch.matmul(h_i, w_i) + + +class MLDecoder(nn.Module): + def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): + super(MLDecoder, self).__init__() + embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups + if embed_len_decoder > num_classes: + embed_len_decoder = num_classes + + # switching to 768 initial embeddings + decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding + self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) + + # decoder + decoder_dropout = 0.1 + num_layers_decoder = 1 + dim_feedforward = 2048 + layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, + dim_feedforward=dim_feedforward, dropout=decoder_dropout) + self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) + + # non-learnable queries + self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) + self.query_embed.requires_grad_(False) + + # group fully-connected + self.num_classes = num_classes + self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) + self.duplicate_pooling = torch.nn.Parameter( + torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) + self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) + torch.nn.init.xavier_normal_(self.duplicate_pooling) + torch.nn.init.constant_(self.duplicate_pooling_bias, 0) + self.group_fc = GroupFC(embed_len_decoder) + + def forward(self, x): + if len(x.shape) == 4: # [bs,2048, 7,7] + embedding_spatial = x.flatten(2).transpose(1, 2) + else: # [bs, 197,468] + embedding_spatial = x + embedding_spatial_786 = self.embed_standart(embedding_spatial) + embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) + + bs = embedding_spatial_786.shape[0] + query_embed = self.query_embed.weight + # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) + tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand + h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] + h = h.transpose(0, 1) + + out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) + self.group_fc(h, self.duplicate_pooling, out_extrap) + h_out = out_extrap.flatten(1)[:, :self.num_classes] + h_out += self.duplicate_pooling_bias + logits = h_out + return logits diff --git a/flagai/model/vision/layers/mlp.py b/flagai/model/vision/layers/mlp.py new file mode 100755 index 00000000..91e80a84 --- /dev/null +++ b/flagai/model/vision/layers/mlp.py @@ -0,0 +1,126 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features // 2, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, + norm_layer=None, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.drop = nn.Dropout(drop) + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/flagai/model/vision/layers/non_local_attn.py b/flagai/model/vision/layers/non_local_attn.py new file mode 100755 index 00000000..670e8f24 --- /dev/null +++ b/flagai/model/vision/layers/non_local_attn.py @@ -0,0 +1,145 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvNormAct +from .helpers import make_divisible +from .trace_utils import _assert + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + _assert(block_size == block_size1, '') + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + _assert(x.shape[-1] % self.block_size == 0, '') + _assert(x.shape[-2] % self.block_size == 0, '') + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/flagai/model/vision/layers/norm.py b/flagai/model/vision/layers/norm.py new file mode 100755 index 00000000..85297420 --- /dev/null +++ b/flagai/model/vision/layers/norm.py @@ -0,0 +1,24 @@ +""" Normalization layers and wrappers +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + + def forward(self, x): + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial BCHW tensors """ + def __init__(self, num_channels): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) diff --git a/flagai/model/vision/layers/norm_act.py b/flagai/model/vision/layers/norm_act.py new file mode 100755 index 00000000..34c4fd64 --- /dev/null +++ b/flagai/model/vision/layers/norm_act.py @@ -0,0 +1,151 @@ +""" Normalization + Activation Layers +""" +from typing import Union, List + +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .trace_utils import _assert +from .create_act import get_act_layer + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__( + self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing + _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') + + # exponential_average_factor is set to self.momentum + # (when it is available) only so that it gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: # type: ignore[has-type] + self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore[has-type] + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + r""" + Decide whether the mini-batch stats should be used for normalization rather than the buffers. + Mini-batch stats are used in training mode, and in eval mode when buffers are None. + """ + if self.training: + bn_training = True + else: + bn_training = (self.running_mean is None) and (self.running_var is None) + + r""" + Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be + passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are + used for normalization (i.e. in eval mode when buffers are not None). + """ + x = F.batch_norm( + x, + # If buffers are not to be tracked, ensure that they won't be updated + self.running_mean if not self.training or self.track_running_stats else None, + self.running_var if not self.training or self.track_running_stats else None, + self.weight, + self.bias, + bn_training, + exponential_average_factor, + self.eps, + ) + x = self.drop(x) + x = self.act(x) + return x + + +def _num_groups(num_channels, num_groups, group_size): + if group_size: + assert num_channels % group_size == 0 + return num_channels // group_size + return num_groups + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__( + self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(GroupNormAct, self).__init__( + _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.drop(x) + x = self.act(x) + return x + + +class LayerNormAct(nn.LayerNorm): + def __init__( + self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + x = self.drop(x) + x = self.act(x) + return x + + +class LayerNormAct2d(nn.LayerNorm): + def __init__( + self, num_channels, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) + x = self.drop(x) + x = self.act(x) + return x diff --git a/flagai/model/vision/layers/padding.py b/flagai/model/vision/layers/padding.py new file mode 100755 index 00000000..34afc37c --- /dev/null +++ b/flagai/model/vision/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/flagai/model/vision/layers/patch_embed.py b/flagai/model/vision/layers/patch_embed.py new file mode 100755 index 00000000..b074798b --- /dev/null +++ b/flagai/model/vision/layers/patch_embed.py @@ -0,0 +1,40 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple +from .trace_utils import _assert + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/flagai/model/vision/layers/pool2d_same.py b/flagai/model/vision/layers/pool2d_same.py new file mode 100755 index 00000000..4c2a1c44 --- /dev/null +++ b/flagai/model/vision/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/flagai/model/vision/layers/pos_embed.py b/flagai/model/vision/layers/pos_embed.py new file mode 100755 index 00000000..99a122a0 --- /dev/null +++ b/flagai/model/vision/layers/pos_embed.py @@ -0,0 +1,207 @@ +import math +from typing import List, Tuple, Optional, Union + +import torch +from torch import nn as nn + + +def pixel_freq_bands( + num_bands: int, + max_freq: float = 224., + linear_bands: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + if linear_bands: + bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device) + else: + bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device) + return bands * torch.pi + + +def inv_freq_bands( + num_bands: int, + temperature: float = 100000., + step: int = 2, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> torch.Tensor: + inv_freq = 1. / (temperature ** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands)) + return inv_freq + + +def build_sincos2d_pos_embed( + feat_shape: List[int], + dim: int = 64, + temperature: float = 10000., + reverse_coord: bool = False, + interleave_sin_cos: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None +) -> torch.Tensor: + """ + + Args: + feat_shape: + dim: + temperature: + reverse_coord: stack grid order W, H instead of H, W + interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos + dtype: + device: + + Returns: + + """ + assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' + pos_dim = dim // 4 + bands = inv_freq_bands(pos_dim, temperature=temperature, step=1, dtype=dtype, device=device) + + if reverse_coord: + feat_shape = feat_shape[::-1] # stack W, H instead of H, W + grid = torch.stack( + torch.meshgrid([torch.arange(s, device=device, dtype=dtype) for s in feat_shape])).flatten(1).transpose(0, 1) + pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) + # FIXME add support for unflattened spatial dim? + + stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos + pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) + return pos_emb + + +def build_fourier_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + num_bands: int = 64, + max_res: int = 224, + linear_bands: bool = False, + include_grid: bool = False, + concat_out: bool = True, + in_pixels: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> List[torch.Tensor]: + if bands is None: + if in_pixels: + bands = pixel_freq_bands(num_bands, float(max_res), linear_bands=linear_bands, dtype=dtype, device=device) + else: + bands = inv_freq_bands(num_bands, step=1, dtype=dtype, device=device) + else: + if device is None: + device = bands.device + if dtype is None: + dtype = bands.dtype + + if in_pixels: + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in feat_shape]), dim=-1) + else: + grid = torch.stack(torch.meshgrid( + [torch.arange(s, device=device, dtype=dtype) for s in feat_shape]), dim=-1) + grid = grid.unsqueeze(-1) + pos = grid * bands + + pos_sin, pos_cos = pos.sin(), pos.cos() + out = (grid, pos_sin, pos_cos) if include_grid else (pos_sin, pos_cos) + # FIXME torchscript doesn't like multiple return types, probably need to always cat? + if concat_out: + out = torch.cat(out, dim=-1) + return out + + +class FourierEmbed(nn.Module): + + def __init__(self, max_res: int = 224, num_bands: int = 64, concat_grid=True, keep_spatial=False): + super().__init__() + self.max_res = max_res + self.num_bands = num_bands + self.concat_grid = concat_grid + self.keep_spatial = keep_spatial + self.register_buffer('bands', pixel_freq_bands(max_res, num_bands), persistent=False) + + def forward(self, x): + B, C = x.shape[:2] + feat_shape = x.shape[2:] + emb = build_fourier_pos_embed( + feat_shape, + self.bands, + include_grid=self.concat_grid, + dtype=x.dtype, + device=x.device) + emb = emb.transpose(-1, -2).flatten(len(feat_shape)) + batch_expand = (B,) + (-1,) * (x.ndim - 1) + + # FIXME support nD + if self.keep_spatial: + x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) + else: + x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) + x = x.reshape(B, feat_shape.numel(), -1) + + return x + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +def apply_rot_embed_split(x: torch.Tensor, emb): + split = emb.shape[-1] // 2 + return x * emb[:, :split] + rot(x) * emb[:, split:] + + +def build_rotary_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + dim: int = 64, + max_freq: float = 224, + linear_bands: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + """ + NOTE: shape arg should include spatial dim only + """ + feat_shape = torch.Size(feat_shape) + + sin_emb, cos_emb = build_fourier_pos_embed( + feat_shape, bands=bands, num_bands=dim // 4, max_res=max_freq, linear_bands=linear_bands, + concat_out=False, device=device, dtype=dtype) + N = feat_shape.numel() + sin_emb = sin_emb.reshape(N, -1).repeat_interleave(2, -1) + cos_emb = cos_emb.reshape(N, -1).repeat_interleave(2, -1) + return sin_emb, cos_emb + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_res=224, linear_bands: bool = False): + super().__init__() + self.dim = dim + self.register_buffer('bands', pixel_freq_bands(dim // 4, max_res, linear_bands=linear_bands), persistent=False) + + def get_embed(self, shape: List[int]): + return build_rotary_pos_embed(shape, self.bands) + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) diff --git a/flagai/model/vision/layers/selective_kernel.py b/flagai/model/vision/layers/selective_kernel.py new file mode 100755 index 00000000..3d71e3aa --- /dev/null +++ b/flagai/model/vision/layers/selective_kernel.py @@ -0,0 +1,119 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvNormActAa +from .helpers import make_divisible +from .trace_utils import _assert + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + _assert(x.shape[1] == self.num_paths, '') + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + aa_layer (nn.Module): anti-aliasing module + drop_layer (nn.Module): spatial drop module in convs (drop block, etc) + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer, drop_layer=drop_layer) + self.paths = nn.ModuleList([ + ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/flagai/model/vision/layers/separable_conv.py b/flagai/model/vision/layers/separable_conv.py new file mode 100755 index 00000000..c081e02b --- /dev/null +++ b/flagai/model/vision/layers/separable_conv.py @@ -0,0 +1,76 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer + + +class SeparableConvNormAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_layer=None): + super(SeparableConvNormAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + x = self.bn(x) + return x + + +SeparableConvBnAct = SeparableConvNormAct + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/flagai/model/vision/layers/space_to_depth.py b/flagai/model/vision/layers/space_to_depth.py new file mode 100755 index 00000000..a7e8e0b2 --- /dev/null +++ b/flagai/model/vision/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/flagai/model/vision/layers/split_attn.py b/flagai/model/vision/layers/split_attn.py new file mode 100755 index 00000000..ac54f898 --- /dev/null +++ b/flagai/model/vision/layers/split_attn.py @@ -0,0 +1,84 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + x = self.drop(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/flagai/model/vision/layers/split_batchnorm.py b/flagai/model/vision/layers/split_batchnorm.py new file mode 100755 index 00000000..830781b3 --- /dev/null +++ b/flagai/model/vision/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/flagai/model/vision/layers/squeeze_excite.py b/flagai/model/vision/layers/squeeze_excite.py new file mode 100755 index 00000000..e5da29ef --- /dev/null +++ b/flagai/model/vision/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/flagai/model/vision/layers/std_conv.py b/flagai/model/vision/layers/std_conv.py new file mode 100755 index 00000000..d896ba5c --- /dev/null +++ b/flagai/model/vision/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/flagai/model/vision/layers/test_time_pool.py b/flagai/model/vision/layers/test_time_pool.py new file mode 100755 index 00000000..98c0bf53 --- /dev/null +++ b/flagai/model/vision/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=True): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/flagai/model/vision/layers/trace_utils.py b/flagai/model/vision/layers/trace_utils.py new file mode 100755 index 00000000..83970729 --- /dev/null +++ b/flagai/model/vision/layers/trace_utils.py @@ -0,0 +1,13 @@ +try: + from torch import _assert +except ImportError: + def _assert(condition: bool, message: str): + assert condition, message + + +def _float_to_int(x: float) -> int: + """ + Symbolic tracing helper to substitute for inbuilt `int`. + Hint: Inbuilt `int` can't accept an argument of type `Proxy` + """ + return int(x) diff --git a/flagai/model/vision/layers/weight_init.py b/flagai/model/vision/layers/weight_init.py new file mode 100755 index 00000000..24c0fa7c --- /dev/null +++ b/flagai/model/vision/layers/weight_init.py @@ -0,0 +1,88 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/flagai/model/vision/vit.py b/flagai/model/vision/vit.py new file mode 100644 index 00000000..44479a1e --- /dev/null +++ b/flagai/model/vision/vit.py @@ -0,0 +1,496 @@ +""" +# Copyright © 2022 BAAI. All rights reserved. +""" + +""" +Vision Transformer (ViT) in PyTorch +A PyTorch implement of Vision Transformers as described in: +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 +The official jax code is released and available at https://github.com/google-research/vision_transformer +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert +Hacked together by / Copyright 2020, Ross Wightman +""" + +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint +from typing import Callable +from flagai.model.vision.layers.patch_embed import PatchEmbed +from flagai.model.vision.layers.mlp import Mlp +from flagai.model.vision.layers.drop import DropPath +from flagai.model.vision.layers.weight_init import trunc_normal_, lecun_normal_ +from flagai.model.base_model import BaseModel + +class VitConfig: + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + init_values=None, + class_token=True, + fc_norm=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + weight_init='', + checkpoint_activations=None): + pass + self.img_size=img_size + self.patch_size=patch_size + self.in_chans=in_chans + self.num_classes=num_classes + self.global_pool=global_pool + self.embed_dim=embed_dim + self.depth=depth + self.num_heads=num_heads + self.mlp_ratio=mlp_ratio + self.qkv_bias=qkv_bias + self.init_values=init_values + self.class_token=class_token + self.fc_norm=fc_norm + self.drop_rate=drop_rate + self.attn_drop_rate=attn_drop_rate + self.drop_path_rate=drop_path_rate + self.weight_init=weight_init + self.checkpoint_activations = checkpoint_activations + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class Block(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., init_values=None, + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + +class VisionTransformer(BaseModel): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + """ + + def __init__( + self, config, num_classes=1000): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + global_pool (str): type of global pooling for final sequence (default: 'token') + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + init_values: (float): layer-scale init values + class_token (bool): use class token + fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None) + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + weight_init (str): weight init scheme + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + act_layer: (nn.Module): MLP activation layer + """ + super().__init__(config) + embed_layer=PatchEmbed + block_fn=Block + vit_config = VitConfig(**config) + vit_config.num_classes = num_classes + config = vit_config + + assert config.global_pool in ('', 'avg', 'token') + assert config.class_token or config.global_pool != 'token' + use_fc_norm = config.global_pool == 'avg' if config.fc_norm is None else config.fc_norm + norm_layer = partial(nn.LayerNorm, eps=1e-6) + act_layer = nn.GELU + + self.num_classes = num_classes + self.global_pool = config.global_pool + self.num_features = self.embed_dim = config.embed_dim # num_features for consistency with other models + self.num_tokens = 1 if config.class_token else 0 + self.grad_checkpointing = False + + self.patch_embed = embed_layer( + img_size=config.img_size, patch_size=config.patch_size, in_chans=config.in_chans, embed_dim=config.embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if self.num_tokens > 0 else None + self.pos_embed = nn.Parameter(torch.randn(1, num_patches + self.num_tokens, config.embed_dim) * .02) + self.pos_drop = nn.Dropout(p=config.drop_rate) + + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + block_fn( + dim=config.embed_dim, num_heads=config.num_heads, mlp_ratio=config.mlp_ratio, qkv_bias=config.qkv_bias, init_values=config.init_values, + drop=config.drop_rate, attn_drop=config.attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(config.depth)]) + self.norm = norm_layer(config.embed_dim) if not use_fc_norm else nn.Identity() + + # Classifier Head + self.fc_norm = norm_layer(config.embed_dim) if use_fc_norm else nn.Identity() + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if config.weight_init != 'skip': + self.init_weights(config.weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'moco', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.cls_token is not None: + nn.init.normal_(self.cls_token, std=1e-6) + named_apply(get_init_weights_vit(mode, head_bias), self) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + init_weights_vit_timm(m) + + @torch.jit.ignore() + def load_weights(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes: int, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + # if self.grad_checkpointing and not torch.jit.is_scripting(): + # x = checkpoint_seq(self.blocks, x) + # else: + x = self.blocks(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + return x if pre_logits else self.head(x) + + def compute_loss(self, logits, labels): + loss_func = nn.CrossEntropyLoss() + return loss_func(logits, labels) + + def forward(self, images=None, labels=None, **kwargs): + + x = self.forward_features(images) + x = self.forward_head(x) + loss = None + if labels is not None: + loss = self.compute_loss(x, labels) + return_data = {"logits": x, "hidden_states": x, "loss": loss} + + return return_data + + +def init_weights_vit_timm(module: nn.Module, name: str = ''): + """ ViT weight initialization, original timm impl (for reproducibility) """ + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.): + """ ViT weight initialization, matching JAX (Flax) impl """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_moco(module: nn.Module, name: str = ''): + """ ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """ + if isinstance(module, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) + nn.init.uniform_(module.weight, -val, val) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_init_weights_vit(mode='jax', head_bias: float = 0.): + if 'jax' in mode: + return partial(init_weights_vit_jax, head_bias=head_bias) + elif 'moco' in mode: + return init_weights_vit_moco + else: + return init_weights_vit_timm + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights + # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + +def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + elif 'pre_logits' in k: + # NOTE representation layer removed as not used in latest 21k/1k pretrained weights + continue + out_dict[k] = v + return out_dict + + + + diff --git a/flagai/trainer.py b/flagai/trainer.py index 911fe653..ae5aeefd 100644 --- a/flagai/trainer.py +++ b/flagai/trainer.py @@ -198,22 +198,31 @@ def __init__( self.not_call_launch = True self.deepspeed_config = deepspeed_config self.model_parallel_size = model_parallel_size - if 'deepspeed' in env_type or env_type == 'pytorchDDP': + self.num_nodes = num_nodes + self.num_gpus = num_gpus + self.master_ip = master_ip + self.master_port = master_port + self.hostfile = hostfile + self.training_script = training_script + + training_paras = self.get_dist_args() + + if 'deepspeed' in self.env_type or self.env_type == 'pytorchDDP': # Implement for AutoLaunch # >>> python train.py # will call get_dist_args() # `--not_call_launch` is default 'False' # So, if `env_type` is `pytorch`, the `Trainer` will not call lanch_dist() # Otherwise, the lanch_dist() is called to launch 'train.py' with `--not_call_launch` - self.get_dist_args() if not self.not_call_launch: launch_dist(launcher='distributed_deepspeed' if 'deepspeed' in env_type else 'distributed_torch', - num_nodes=num_nodes, - gpus_per_node=num_gpus, - master_addr=master_ip, - master_port=master_port, - hostfile=hostfile, - training_script=training_script) + num_nodes=self.num_nodes, + gpus_per_node=self.num_gpus, + master_addr=self.master_ip, + master_port=self.master_port, + hostfile=self.hostfile, + training_script=self.training_script, + training_paras=training_paras) os._exit(1) self.initialize_distributed() @@ -239,6 +248,7 @@ def get_dist_args(self): self.master_addr = os.environ.get('MASTER_ADDR', '127.0.0.1') self.master_port = os.environ.get('MASTER_PORT', '17500') log_dist("not_call_launch: {}".format(ds_args.not_call_launch)) + return [] def set_seed(self, seed=1234): """Set random seed for reproducability.""" @@ -513,7 +523,8 @@ def train(self, lr_scheduler, single_step=True) dist.barrier() - total_lm_loss += lm_loss.data.detach().float() + if lm_loss is not None: + total_lm_loss += lm_loss.data.detach().float() # Logging. if (self.iteration + 1) % self.log_interval == 0: @@ -1032,3 +1043,173 @@ def evaluate_and_print_results( log_dist(string, [0]) log_dist('-' * length, [0]) return eval_dict + + +class BatchTrainer(Trainer): + def __init__(self): + super(BatchTrainer, self).__init__() + + def get_dist_args(self): + parser = argparse.ArgumentParser() + parser.add_argument('--local_rank', + type=int, + default=0, + help="local_rank") + parser.add_argument('--env_type', + type=str, + required=True, + help="env_type: pytorch, pytorchDDP, deepspeed, deepspeed+mpu") + parser.add_argument('--not_call_launch', + action='store_true', + help="not call launch!") + parser.add_argument('--experiment_name', + type=str, + default="test", + help="experiment_name") + parser.add_argument('--batch_size', + type=int, + default=1, + help="batch size") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="gradient_accumulation_steps") + parser.add_argument('--lr', + type=float, + default=1e-5, + help="learning rate") + parser.add_argument('--weight_decay', + type=float, + default=1e-3, + help="weight_decay") + parser.add_argument('--epochs', + type=int, + default=2, + help="epochs") + parser.add_argument('--fp16', + type=bool, + default=False, + help="fp16") + parser.add_argument('--log_interval', + type=int, + default=10, + help="log_interval") + + parser.add_argument('--eval_interval', + type=int, + default=1000, + help="eval_interval") + parser.add_argument('--load_dir', + type=str, + default=None, + help="load_dir") + parser.add_argument('--save_dir', + type=str, + default="./checkpoints", + help="save_dir") + parser.add_argument('--save_interval', + type=int, + default=1000, + help="save_interval") + parser.add_argument('--num_checkpoints', + type=int, + default=1, + help="num_checkpoints") + parser.add_argument('--pytorch_device', + type=str, + default="cpu", + help="pytorch_device") + parser.add_argument('--num_nodes', + type=int, + default=1, + help="num_nodes") + parser.add_argument('--num_gpus', + type=int, + default=1, + help="num_gpus") + parser.add_argument('--deepspeed_config', + type=str, + default="./deepspeed.json", + help="deepspeed_config") + parser.add_argument('--hostfile', + type=str, + default="hostfile", + help="hostfile") + parser.add_argument('--model_parallel_size', + type=int, + default=1, + help="model_parallel_size") + parser.add_argument('--training_script', + type=str, + default="train.py", + help="training_script") + parser.add_argument('--master_ip', + type=str, + default="127.0.0.1", + help="master_ip") + parser.add_argument('--master_port', + type=int, + default=17500, + help="master_ip") + + ds_args = parser.parse_args() + self.local_rank = ds_args.local_rank + self.not_call_launch = ds_args.not_call_launch + self.rank = int(os.environ.get('RANK', 0)) + self.world_size = int(os.environ.get('WORLD_SIZE', 1)) + self.master_addr = ds_args.master_ip + self.master_port = ds_args.master_port + self.env_type = ds_args.env_type + self.experiment_name = ds_args.experiment_name + self.batch_size = ds_args.batch_size + self.gradient_accumulation_steps = ds_args.gradient_accumulation_steps + self.lr = ds_args.lr + self.weight_decay = ds_args.weight_decay + self.epochs = ds_args.epochs + self.fp16 = ds_args.fp16 + self.log_interval = ds_args.log_interval + self.eval_interval = ds_args.eval_interval + self.load_dir = ds_args.load_dir + self.save_dir = ds_args.save_dir + self.save_interval = ds_args.save_interval + self.num_checkpoints = ds_args.num_checkpoints + self.pytorch_device = ds_args.pytorch_device + self.num_nodes = ds_args.num_nodes + self.num_gpus = ds_args.num_gpus + self.deepspeed_config = ds_args.deepspeed_config + self.hostfile = ds_args.hostfile + self.model_parallel_size = ds_args.model_parallel_size + self.training_script = ds_args.training_script + + log_dist("not_call_launch: {}".format(ds_args.not_call_launch)) + + return [ + "--env_type", + self.env_type, + "--experiment_name", + self.experiment_name, + "--batch_size", + str(self.batch_size), + "--gradient_accumulation_steps", + str(self.gradient_accumulation_steps), + "--lr", + str(self.lr), + "--weight_decay", + str(self.weight_decay), + "--epochs", + str(self.epochs), + "--log_interval", + str(self.log_interval), + "--eval_interval", + str(self.eval_interval), + "--load_dir", + str(self.load_dir), + "--save_dir", + str(self.save_dir), + "--save_interval", + str(self.save_interval), + "--num_checkpoints", + str(self.num_checkpoints), + "--fp16", + str(self.fp16) + ] \ No newline at end of file diff --git a/flagai/utils.py b/flagai/utils.py index cb130a81..06c30026 100644 --- a/flagai/utils.py +++ b/flagai/utils.py @@ -206,8 +206,7 @@ def save_checkpoint(iteration, sd['rng_tracker_states'] = mpu.get_cuda_rng_tracker().get_states() if env_type == 'pytorch' or (env_type != 'deepspeed+mpu' and dist.get_rank() == 0) or ( - env_type == 'deepspeed+mpu' - and mpu.get_data_parallel_group() == 0): + env_type == 'deepspeed+mpu'and mpu.get_model_parallel_src_rank() == 0): ensure_directory_exists(checkpoint_name) config_path = os.path.join(save_dir, str(iteration), 'config.json') @@ -220,6 +219,7 @@ def save_checkpoint(iteration, tracker_filename = get_checkpoint_tracker_filename(save_dir) with open(tracker_filename, 'w') as f: f.write(str(iteration) + '\t' + str(best_iteration)) + # Wait so everyone is done (necessary) if barrier and dist.is_initialized(): torch.distributed.barrier() diff --git a/flagai_wechat.png b/flagai_wechat.png index 387bded2058c20f57a3c8746715d24eb96a0145f..e9dd1d3062b46ad8edb2b2912eee10b327c59c2e 100644 GIT binary patch literal 56710 zcmb@tc|26@`#65aVC-Wj+bC-!MV7J**^`JODwB|XiL8woyOO0SN(-&pG|5`Y*d=Ap z*cqNewwYoaX8RrWJkR_6{d~WFe1E^!@2+#sHTSvh>%Ojgxwdm-53#2J;X~GT)&L9! z09T=Z0GkY4u!;>h2LKKZfCc~nJOC$52H=7q2n8^O$^Hvl!_)!z?|KdZ$Or&9|He52 zJ+}%(?RT3$PmU6fe=uNqCGdaY^et5O5@6`$A9f)u(myO*QFr$q!0@1*1Lqb+sQiXi zeuKgT@>#>Vz#Go_MBbZ=;6WUF8u`-B!UE&u>}YLw*y^_+$U9!);lUij01y&-A;Q__ zpyCO44@Jaxh#LWbAJ_>#rj-lF->^Edr}Z?wVRyaWAOTeSYI{=Wl6e0(Fl zA>o}M^s_#Z-XRcN2>_g_-r*4!0D#*Ns#l7+5WWSUf?)9oh(QPr--7-Az^PmC*+1~j z-#EvdEg+ni(3A*xc}4gEfKV+|uNdv^2hkB4g5W)&J^`TsfaHN-Q*VDS9|*RE;9Vgh zVOubS2dJO@FFJnz1$%j&{gSB!ale23|9J}uj)KPPcTt7z z0{#)U&d^gF8n>aKF#9c70D>1o&mM7vU{MGL10ae1z#M%3QMSh+7{cd}^NzHD#uAW( z;5|NGmi7=V2f@~%!AG{{d#nEJxd3ZB2!>V&M{K|ayJHZ%)%KEaq?HQe)s8O|3S;JURVgdkWl zGRS3%ABYFIa=gFAR{d|Bh$!bR{*|Hn1HPd)2O${Z2kv<;!g@>gE&Pa(NC^7P8~l=g zfZbMGh);OY1^;7PbG$|8rMK5_{-q(f);HAr?=gXU`bM7E;_bIwKE9S#5Ddu<@9_;i z{-7KgAf|`pPxMHH-;Jj`#=TA9{cTFa`DV zXRLgoar`ftIaML5dYlJ2O(6OZZB9u}89+hlBsU|Np!R0DK@m z?f>TWUrhKVcm=#2-U#o2*THK5MK}>&3;zuNum%6!zxD4~^!v9r=ihT21kFsqzc~NM z>jl+p^|`eUBKN%5Grx!Nhsp)t*bC6Q1uVkC<01n5{4Xe)Lz>@L(Js_`_bx@9Jvur7 zuw_TKZ~)+!(S^I}GjV$}|Ar`~IIgwR%V$NCCjdE8bBN z(SPW1ZaugFJ^%^rsBM4(paiHx^5_8ifDvE@SOJFsN8lLX4tPSV&L3LO5kL%p1(JYt z;0k1EZvaI=G4K$03X}nFfEu6^6Hwhe}Y zslYU0Ixs_+Da;CX80HLfhn<1>!9rnCFf1$ub_JFPD}vpJJ%g3Q-ohGT9k5>5FpLbF zhy8?Ya=IqW%(ah&Gx<2cU|&ymWJ!*P@20mn;@YK}&ZPL8h} zQydE%Yj6P050`{*hikzN;TCX5xCi_kJOYk`XTl5M58$uh@1S|^hfl#5;TxRXoMN1c zoLbNdw&gs|>CJhb6UUjwS;YC2vzoJov!9d9N#SI133ADCX>uLlvg10z<;NAnmCjYb z^@OX6s}*z%RR%rhCm>;Av6&t2uFk$ z;sPQaaSQPhK}7T+z9B#!ejXH$4v#gD2Tv$ZGEV`|bDnyhKAu?~Iq}K`=+~nP9Wvgdiv+CbV0~ zPRK_HFLYa|TBuKG8Oe)OL7F2yk@3g^WCgMtxhTvltSW3F>?Mp7zAgM#m?XR=A||p| z#8D(fBunJENQcOrC_+?K)JoJxG*$GWXtU^y7+g$A%tFjtEJf^*SgY7Kac*%naXaxq z@hjqG;$7kt2~mlC60Q=l61OEjNK8q>B~>NuB!eY$CErL6N^VNYOPNdgN?n$ECDkXj zE-foHpL)=1V{HdFSE z?1&tPoQ9mU+(o&Ea-Zea>Hl&M942YEq&oqm&(# zvC1!$M^t!K3|0J93RT)w)>YM1-BdGFYgFgdq}A-y;?-WLjj0Q&qt(x=KTsdo$-UEX zXVA{OJ9{)ZH1st5HEwHkYjSAnYX)fE)$HBHwQK*b&|MF9k+cM~%(bGmUTBebOYT0r zJ9&4_?qzLdZ4d2h+U?q`J$ieB_dMD&x>t1Xp}om_-|hXWqoL!ib6aOXS5Vhl7q45Z zOWmir&v)OweM5R;dJcLSdQEyveFOan{a5<)2FeC!4DJ|^48;wd46_VB?&sQna6f*3 z-TsXO`Ufr?cynOINXsb5=(*9Hv6`{3@gw6IlN}~rCihLIOchPfn3kAMp|_*G&=1hl zW=dv0W>3s!&3Bpyn!hw(KDg&##KEeA>lOzr5-b`l;g&X*nU-BvqE^SPimWEAcUb#b zzp$p*=-FJfX|Uz8wYR-yOR|%<^R|0tM>%A0DB)0xy@0)oeUbh2VU5EPhu=AHI2>}w zcNjaOawPOfjU(V_=a}y}?xf~)-s$~O?xRjeZy){cyw@4)-0mXgf^m87LOXWwSnjdW zZg>p_k|r9tdqw_rjDPso{& zx1pk;L7^>S3SqHfz2TbS>ETo751hYoo)Tdn@gx!!c{;N8f;i-`e~wa*N{b>#n?~P` z24h@fDq=-qLt{V3X~bQQn~k@Qe|(YiqW8rn>~?GtmYiUoa32T5oy9fbmGG(fZ;3XE zPm}nPf|9-@?@7)}UQKaJd6z1mnwUD1W|Q_JT_`;wos?meQGAK>QoyCI%et3uU1nwa zWPZA`=Ssm9MwWNhr|iAiH?!F}=W@Do^>goC<-Qtvb?_Sc+S5Fdy!gDCe24t%>x$Pi zudm*~+~_FSS5REYR~S_|b@TAe+FQ!EuHIr4`4@e?eegEnj@+Hgch>Lv-0iz(cJEcO zeDRgy&60qUq5C%Xs~@O7D0s;IF#6&5N3M_B9~(Y?_C)5%l_$*7(9)@=&QDvO89aOT zT<&@93yv32FXmsKdf8KESyuZ>>(zaNB;m?y;C0mNALVDt2jAGgX{s=+cvY!Vd8bOO zDzh3^9al}M@voVxb+7Gxd+2S`JEM11?{~j1t=nFA`-8-X+UY8K1+%ExXLCAp zb@SHqJwHzV__h$fKwnH>5?Q*ltiD{iVz%;`a)R=W8bM|M%=)$MSLy1$)uuJ4wFz1X zjlP} z0YIz=@*htAiSvGA!2ZOh5DfeK+N%B^@Sj+4>jmUP0I#6Omb-4z006I{OEnRqZ3X~a zuKF=EfJ0aJ-wUeQDv{@OA&& zC%)W5=7!>m60084=01DqmUqTBbHbBi7ILhLv%u9I}_K9ADD z@R_UY*x816q{VQFP;V{3QF<=AmoH^?RP_VGRE z=N}Llc_At~CN?fUB{eNQqjPoF)1Q&Cw}T~qt^T~l*Q zYg_xrj!%94178P8L&GEF>6yR2eV?71|3UrvYjusb4sL92$pr)8f5iG%vi~8M2qYH= zCnuZ}u_YIbBL=$QBAi^?_i~GxA4Pbb7u%te#3O$2+WqoIUL{>;ip1H7UOq|XePk8t zmT12v`@a(``Tvt-{|fdWxke%Pnd5hXb8x`9;BYt>Hy2d6c_43#o12G^=Xc@zy9oR) zLR$s-Z(&13U=R&XPEG{$UznelU-dbK zezAefm>5Y@fV6w*RQs;zH*a1WDyABi2)(U3y-zOT`Cc}_AwIbjV!*=9uz~s);5#a5 zNtXJ2v`ywi6)|(AZ(r>h@l%HYg-N{#45g1KyNPc)iDv^zI&6R%<&RwIq@O&@28wIh zz{-7lX4w;b_XzGR@e%bb0d%qBu8UIz{k^*BL=p;42x;m_X~E;&b^)ca!Pp|tumY`-V>dp`)u?ImdziFiOr=OE{S+`&aqW71CJEoOI zF#qSItd1Z}g43f+mJdrAM`8n&uGi?6*Sc-NKlxh8sWx%UR=ovaJ3I3Zj+rpln5AcO0nO^8uX zuMNEO$s6-Aj^Jt{%H?O;!I(-%t;7@63123`od&IrD24`j)b}M8U1Y26d+Te~q0iwQ zGt11NX*MvlK)VU{&k!?Ajfqs>uPuchrymhuvE#PBSt40$-;2y}>*+@2j)d>CG;`*2 zmI>HSyHrIXWD-Kp^?j?o6;Ayq6*h{QwhJ-m1dHH_Sb5P2BVCAxb zl)jVQc4N`ciA86kFE|U`n8Bl}m~PD|zNqmqt8@09{`S_#L?cs%oDgq%F<-hDv*~Ww zi`4cd!P3~UR;farfXFe88?k@!Cnwmo;%%MsdfM8bIpGfhdC$A!f~~I>#B%o^L*c4P^uOYn<3X!!11c z4)Z@03Dh<5KTZDadJWU^KUK`L2h7JfgUU-=g8Y@c5xb>!N{~ymZRFb1l-j5@gq{{w zj16%9r0Nr=2}!ig;!6ad`nP9q#YtD-rJ|Kd69WLWqG9YFCWBxAeqaO2lNdHYup_R* zJ+FC&Q7^LW>WOT?Gao<4oru1KytK=$ZKMl5OMbDAqCv_gIf|hSwo@YQsjrCn7h~Q> zOE_BOtbrj^B+Dkmk5w;2EOP|aws$Xny^n8T1FIVE=MWjt2t3Ad#TzgwmH2<4s@0e! z7y7wadzxpygQMuEF&h{`Rz)YDZ!}3-CFG*XR1^;zm~9H9e`5n+QKAtByg+CmSVQA0 znfo8GmH=XHn$R1R+pG;dOZ6%nXgq5wLVJA^8tRj5;Btx3C`(?GqTNeeNh5SZl+^te zFHfO{-!$9RQsRsVr?@}DlWmt)3G?tNzcww|nV7^7C!jceK@iT)&IpS0W z=BEHQ4nOk@2{dpi;*Wy=Fh9$boo9e6iFJaGBh4$LwYl*VX#!tM6GQ zOWxR2RA!08Oe84y%jl^RHUKx)o-rzh|1fy@F74&_agz5LL&JsBVXZ5pC&|dh+nuu7 zlBf4YU|LkOxYWKGu5R|dMjZm*H%v%VzvRtNsK3r(jUPE^tD}S0m^DUIg2<$_rO@C? z6G?vCF!7hquOv2P3C9t(4dR1yQ#Vn~Cml203J>qQ*rt(PIjhJK+fF}ti_%Z~4BAue zt-e-u3y$wA*fb7NZh0ns{B%2^N+#(?5vCE9Rxeza-{PT`u~bIzLMlSC-Vwb>`i5`G z1vTzMj!3=E0!qNm+|I?mUjLrgco(prKCwnCO+<&TB;&>FoF}+?LdRDg>DW|0M83Tm zpBu=S0;|Z{LwD;<7HUk@zm__R=$AOOc=?Q|Hq7K`Tve!{pJS0G5Ma#9ITYm@s6TqL zc3^Sp?W9JzhDtU{ZVziMAa<1Yf}&bT-{p@I*=@W%#I!iRLFJn3P)6r?!+7!FQ{0fL zJTt#a(59I{wK-Zea=g4ovJrl#Wrtbn5*zT7SZ0YZi*N`=0`uM~o*NAPVP|Z_t+37X zO#99nl8HK8j0R&qsFS6>S~z;8*;J~ALO+>%yTjQ4`)J)8^EB&zlIxrm)`F5k3jqV@ za{fi7nU&ARMl~d_=d~x6l1!dK&Ko)mmCOdj>w-{8SfdLoPwmtSVAgQsb@ZVxB_<0_ zAU~z_*x+P~!33(hfMqSzcYWiE5HpDEIP!}|!w-A6x|;I$R~~zRspU3q{nO)EIgxqH zGp`hNCE^<4GTI+MgeOlfiA_rz+f3iOx1W6cXyt3NwULY%ytU4p4RFEU^|FC;cQt3O z_DMV*aF^;ZdTp@0-c2D{&=qk*JuFghV7zM8!}c%%&5R?qXYAxVR$pr8PQqCLLlGL2 z6-fOG6I4=~X?cp-q_~jg=mU{sBqh2O2KN~)w0bwPg~+$Y8@j#HVV&D-gO4A7k@^HN z7oAJ7A%~@g{!>FAp&Z zxoYOD@r-g6yCH@5?Pqst8v^Wf$rK@3g_nPj(p6z9bO(VfyAFgVizqiK`{}{(A zWr$*??4Q>Penc#5KgOx!8;yy7eXG-B1E(`s$zPcXfklrz6f(}g=>8o2`WCX+Z69nS zhz$r8fD$YcWV*yq`oV9ih9TZhCbT)C z=I?Rz1%Jy~aJg~z;PKb|3CEleywhk~ zi_G9JJ;7{X_{FN-37vF#q}~SPLUEFnaa}zktajEH%!6tobxDf;fKWL8)?lzTQ~g27 zLw4_P|KWXib2gs!K@~D~G9Px-%}mcS^8cw21n@|rFi5R_gPte9*zAXVhX?`O7Y~uO zNyry+`{_;P*mdFM}9X(HE^#?M92@@-zmegi9 zAD)C~KKTkNSv9CX$OKr8=&3s;sNqtJ8I&#?P$4AY|Jzi%O%f?b!Z!4?`G2s1j-V-R za2JV@Vr&A)ec5MGsIA=BNr{%z^h0$Fvm@N709y2|1s{Hka55L+qj=U%$wG* zkaAe)2nGcnkR`tEeBM-%QsWgG*)QRDlvZ-Ew8!he`*P3ZgB=@h8tr8uibL-0owAT zU~#iS&q*s8i5+Ic-ztV==(03#Fc5!^4W#Mg#HcqIcFM}%ejdD2d>(U}^c@ZDfQCs? z;}FW5!EW+r16|KuiLa~uT-(UZE`Kvz*4%`857z92KdF3tahs9Qd5@ZFakRdM10S=9 zHwkUUpo(8}?v;A3S}GiCU;QCTHA5t!>~Uy}wxbTweb?Y@yev3MiDGt>dZ=R(O}wBFrLL9)gL^Fn2^om$Z2L4F$<#AG&Br* zQVR6UaHd#G=A}A|Nc!#oG|$S~#gj&(+vKzQLV02|kWl_W5L!`h3KucY8JZNE?0rUM zxURLe7XfG8E>(+N@QR4wy4cMSpKP2IF%F>Qbvl5Nf&-)%`_j^09H{0wg@5tAeg`v? zCB}%Rv^RGvjKtP=AGLq+-mYM>JMN0)-bDT7S;Au;LE>;RjfZ3^hYikcBblm{zqcxg z7O}>}bibE(n|KV?KwG&U%%36V%B;66XdRg_GI4P-2uwAddIam1Uh)*9D`2-VA6!6< z92|A0THSl5aJ|MQ&oA!tMZ4heF}KxE9}s={XK?#iq)Ccnt^y-&pw8%G$q!+(eJ`Z7 z?ihx8s9zJ>gN!27;e94kJlY!wsqg%f^xo!Oylwb(TfX5GY*J!PJgxj^<|~PcZV!K? zHaJPPs|%uZpP-ECYQ3MZ{3y?w`*f|#vlA0ZJ3hwQHPu0p^4DTj(T&ib8 z{wRqdkccCihXZ}(Hn`Ea3c;e&M*Z5TdNLkGGLh;O3lzsq1K z?=`lzbh~*N_@$d?8an;Kh{b!JGC6}t)~<>MuZ1P>*>ZPR)s>CEi@VRvmPtV)IOv}eswB)=;pQ^X*85rt-D2C|e) zb*R*no>1i5fnL>Mjn_f3;tuO_HgIMG3R+#GaFD{Kr#@Jqekex=m^rTFnc}FiMf?09%O3jbhG;) zbE*F)+-^aw%^R>1gZhHL)bEDOk|7nrjCWuIB{BdD#oEpWJ}$k4#AtR~djWO%oeks% z_TZ=Rn-X{xj4(95(S!H}$es>D_B0aTQRR=O^|OJ4CCDzvKNb?T83N^OMu?n2bW7$V zypMJUv9e+aoef7Vu8-YL2)B8Ku0Ke;k+uDA}*U4iR6KC%f zLo+iyk7S72)9~%QYJWi~i!&Gf7-F|s4i67*5`TJt{4D-`x=&(bSnrTb-WRmUsg z_*8dt8CHz^+UmwLmB^Lc$azdwF~yy*!M%m24H?`R_*B^M5Seuu3XvhvwnAj*ymBLL zC(R9cNFX14M~{E;T2~koAg#4;AIWO*kV8$BG`qbb-4 z*z1C$PanU1yMOZSV?NpCwH43zbZ;D~UKDgE_!q?ICI?qNd7r!=?r^8%Osuraye;w_ z+H2*K>2B~E#iO&uRQKJ*_Z#}lak}sCY?6j86ZaEt5Pi^>-}$wo(KQL_r4pIoPCbxl-EV!(#``%v&X{PMMdw%}6amP9>M#AWNTdx&n-e zyl>?v->I|WE_Y8_`yc``7&||5=bLHa0`ZM@ENzAbXhEqTbq6mdXl2iQ6tA>OYMU}j z2wHvh7A+C0*=(v;DQT+mI%iX2KmU$&R2dJl#c`+xN=pt?`N_pCCVs7(tzW)Q+$gQ9 z{<1ydc(dZi?!meY+J5FMygyO_*DJ{qqn>*k6ZN>RhR^xnxA3wgQYN#O+Ma>sr<^2D zyeEKoi<%t6rQt`Hk7<15HZ&xBnjFT{`Y8Aod^U~6_nM-Tb0bnYA6aPy1(QnN#!nZH zA(qLHN)-&B^dvH-pb5^$x`Jtxe6rrX50qurNq*9;uSGi=FTu{Q60EcVD58Ay!@NH$!FV9>gd)r0<;^#w6o*UjoKNKMXd7s<^-F4uO4jsRe5ITBC-ry zMOVWOE1;;QsSNk&FeG@e->T4}CeQh`sKKJy5L*3y5?z)1K+&mnLK`{}MRCavn_h37 zGnE~m6FI5eZzpy0&3?m!*5PKWgyBTq#>Z#SeCVmi_)20MAr&pUSc%Vg_n|fW(Y0rq zyy3c)3ST!-bRmWr)vu*E=^gqq);CgR;^BEwsW{$UcEI}gdQ0X9B9wyXXNXdnPtsn- zn)Z7ZoM?{ApH-AV&`SraqL8CGsPxGS$qcG@GfS=}NsmJ!kBYl?1UBtyKAb_XO;GNw-#WK-zJYs)M1_MH=v1o9pgzsJ)ld7uQLG(nVP?ZShQ$S$j!fo5FzSWHG!i}+KPIA87gNxo z(MF825K1OWp{L$JSwt9Rg~9(jeBFRTbxtT`%rBsjn9-X(V)a%dpyrmAmgxkGn2UHq~g!<(IpBtLLF@1VLHgbPeW1^xUzx zY=9WX23mR$OGD7;G-Dx4USieGWH_=^A#I3E2hAau=5{U}EGAi=ov|0Q zBnDaj$0Q~l%6#X7XNe3%bh?&&D#R9u#Iwl7xhN~(9aSZdR~_mv66S~#3mYT7(^Q_fba z#Byvug5yTR#<`i3;8D5|R;Rjr;IV(@>rc<^BlB$Krpsc*W-v{;qwr=KQd-^K{Y66- zms!DAU!I>|IE7rIQ`A^erOmfPI0Gs33dhQcw{%~6e>)pr6=%Q@o1{h6Fz+Z*RUaly z8-+N_);j6hXw;e!-m6Ks@+Qr*0Y2i?JvP8=Dg^eSP;==PasKvM_3?Kn$7!!pT%q-m z$l&W?c&jC?X!8v0C|+y{W9=D5zhA%{J;69cL0vwVfU+AbTMj+Pbx=9evD7SxifKnt z?1`5=cYt4`s6ze6t=<$n(N9w_emW5bMPx8hhZ2DJGqJb3mv6C&rm*tP$NjQ($#Tv!~%lIJ2 z^nww!#t%a!kb; zpoCY`uMxB+>~MY`JAI1D>yUvL!#A4MbH_Tik?J`HmNS32irtQr!rP!IGMf)Zr)`q3 z7Rp7Yg)c|PPJPCM;n$(I1Rc2dIve*lQELHcw-pBWzJ(aC;_`%~0wC!Cw zG=)pXhA?wjlDGkb5qwUaHReK38}&voHmf9;Z1SQ5?H`srSwUos!8M!HH+)Z7x_<|@&4g$R=VNVih^+i_9`3QoU8LB+X8~b}ghEo;VdKNI4j%dNp@qB_B^Irnyz&it4WL8F#Nci9-Ga zWag6Nai1M23NuG*6XGXN7$`l`d3kfgmL-CDR&#ZO*4JeCOPnxki@eORqvNnbl=hrZ zEbQs{wPzlmU)PQ^9J27p4+%FO2fB_d-w1f=F4q_*dZ>80_9LOW`*JbmFqFURoTaL`tP^_bEk7~%z09xmfXOJA;dZNi*nHZBSJ^m*GdPo|!gDeMHoL5DY&)}S|~2_w0HzbNR|7;ljN9U76r4t|vL1EMGOFpFE)W{W7V@{Eb_SHX@^kErGaYG@QZLWR0{D8sr9br6tVS-TD!+*Et9IOq+7=l z9XVq*Nz0JxX!J4_(a^}=F_VbcY*}Rk(L`aJMQ^&LkWE@>2D1XPV}&f~&fk{M6|#ir z-UOK^>#DFj>B2qgCECv z9igv}mR}D(J=){ejCrk{#BiKKB^PXJXTBWyL_PFPS~zxmA(MVDkx%_WBAPt_9gR{H zN$D%7h`Ri!rZ3Ddb=r37#`R4JXXtMW&h-CEk&D~p!w*4Oh1q2MB$S_q0*7H@W&Lfs z#VOFEy(Q*az_lN$5|{Rjxb!cRR$He=JMUUF6rqGfmm; zm(&b?)R9)u5BV5zSQWo%*~-2`7hB6i&0z($AL*4>QT;VTFY&+mORp^V^pI}QUXgnO0~LyDYlI}?)MJ!%y%Iwnbo712Fs#pOqMdx4{pb?8`s(mv zg2q+U!vL;4d1||~QZ2__&4-phviL=YNK?6scj-qNF_2Z9ytKKVS#|r$3+9dqgUT4R zPWfb>p+a(9KTYm;WR-gvudt6O7~6mP-TQvO3rM-tkjCJ6KHw)>0V~RUv`q9Lj<+OMJwgap2^w-Glxebj>%_aJiq4+}hYcd1=#1j)DScPSxmaEuXy7 zId0-Q;Gk3y_ABz|UzGEhC+OFZ!^}8I)t#y;zGNI3PO4eSs;gCAqY>V?N+&iwNK61q zkl$lyD&T&)IT%B`)L)e=+4%Iy%^Z5&yxbFml#gmqg5OIh5jb^W?K$bPoMeSPpT@gq ziix_I&x98Is1fZ}AJIRkn|FS5Z6N+s{Qab}0{R=c^QRIf4*}~0aA$7QWcEF6us8@$ zp~=pNa0VlX|MJDuB?#j3Sxq<@YDOB1??k}FKKBA<#EHYL#rZ#iesm+o?4QTF*%JqV zylMaAcV2Vm1wAu+zNZAX)C#B@`+PO}F6!QOZO`tbiH$3nKf7~SJHBGID8y5Pt9FMC z3aXc>Z`K6%Vp1{Xvr92_3y@CBsHT)=t}VRyqVlcQ1)A8VYJ^u`qH)HIO(SN+-ii6J z>h4MFD=s0s9gdIlIVd`6<6^h9?e$!Tv?C|zNbPSmRi=DtPxd0lPQ5;M-CXJEWa>f+ zsSIzC&coZ0ZMVtinl443D_aA45L00-j_GLG-NdtHh~|-CXfLO`3nck^?dEIGx-(n; zk~s99jA@sLliPEbu%`;AfBfw8tr0u47$e;v8xi}x9?Fm<=P^z~8Iu^YRY479deh;+ zvfU%s*w;e9sVKx?y)HAG7^s~?6vipUuDDX8%T30+3Ilc(bgSyTEKlZX%2|bx@#V9P z-6Kc~=EE|*T=C|oss)9*`QJY+-c%`XG$hPt@dMJJN_30)kylle3{3wjcXWM$ zsM62df_D^B-8wOpqup1~!u29pl@tsZ1Et`8b!B#oG>%yKm9G+t@l&_(mB`kWteTNZ zyl9kNm3XRP@>!w%VP5lQC@EGu8iDqAM6!B4LZ407Myd2o*E+WaoW7bk-l)gitqBtd zs^jLVkuNC>$Sr5? zZu?YM0V{TKD3Ene47i4pALS+%6Gr`L&%q7KRICYsZBSB z>9CQYna9P84y9O|>cLl1Ejr_tx%$@3C)S9Xk`mx1tJ;9dPcJ zxpX7_vy>tfG(WzGSgL_^_C+wP6x@O5UWn0mcsdkUvEd`w{e4fz6bxQ>&4@@Ra;j3O8sp+oUQIaw2Vc@%pTf3JPU9aUj+Y+owI5O2sknFEa z#*=ruF7?)adaal;CKDwN`&NVDs+aN%#Eh&f$h^*@RI`D7qn4PMSJBWG`$~u6-$t`F z!f;Bvq$ywD1(mv*YurNLPM5%vl96S&H~Berq!x4~a&a$(NK2j@YjCUCsgMAZN?OSghPt7Jm-;JGQr)rvp*HBBhu)1f~ zvzjb4Pj1P1=sR&c(Z@lPrcnVlz?Juyr*g=ms;aUZN)<`$mk`5XY(v$D!*ae*&JJ1+J5J^@;3Kh zRX9_%n{tPO$PQ^a&sP+3`FHKTRw*|%)8mBb8F6e{0sNz5BxR1I`2~eV@71v3^nZmo z2Qx!wyY$wJVGSrBR%ox%$P2QXwHu;VA)ztBWI!b|JUOJwksAB}3SX$Bv z)4ZWK59Y%R z@=7!0x2P6QqeQ^yu4VW8HW+=L$xs{fw+`6+SI}V&7T-d>X&7o;iTU=G>m1`n_78e{ zmh^v2R^Q;eN|z<1emZfTCrU9Q(apI) zVEyju)FYc%2gM_|+6Uhj=b%w_f^;;P03GMMC1d2i%_&4tuDoHvQ>ql#kB75?0PXa8 zHM;mu{ru9=u=o299@Gt)dAeYRQpSBLrnn^FXL4JwU@{Eq6;4{dJ?EnO%|u$;tbT3t zzCq(;x?54w*JZ4MmdP2V8@E6ZJxd2hR*6U#J@e*D>!VOa$+D1p+qVZxgFUs$fZ z6T0hh3z*0tNE>S>PN1lj_h7_(#|O^V()VuU%Qi#i#si>tNuNztNtfWcv&Yg#IHovPeV0uWB3gKg(9Kz0^@7lm zvHhs@y0g61ms13QI8KF`kwnw8OejYdEL%LnlJ`48mCTTTBza!+W9umc#WPtKNh7Xk zKInY<*%;D3jL^`?>84lHwq?`S-7cS=8k4T#x-f4XneaACrAttD&83kHa2|I>Wv*tK0W`@bredzSF*N!y?vU( zOnkE&J?#akY2fSyfo0#I%m1Xri^?2ElRq#g!XW%^%8RX+NtshPEwy z0TTAoUGa^UQN_bQ*Uc)+d%4zWt#_or5IUbgJsCI)%Pve*dt0P`a$dKIno-t7zJBH~{Mb zG`t7%68Eo&fHut&T2+#!%KhYjW)f}9>6RBe(GSf942yUfOKOUjsCrazzEG3XPIA$Y z{#>~MFb^7vA z6r|ocXG%PnhwDH+O0(O5)M+ARJc~rA`dk(!f?cDxMq+X%D|TO_i;iLg-V~BQw044N z=P2;*#0Y&NlFo%bz!%he&9N=ZigV!6PzHoP~FMbk5Ls~5`Co$9|vj~%} z0K0905wyQQqJJ}WPhvBwfafplduRemj%ETIEA8))i7Y{eJ{57f5+yX;E?@OF?)M4> za)hK1REZ4(x)E+{LYTVVc8rox-24G;e`~nmZH_dY(hzl`ab*EhgWFHZxjRGP$AFH< zw^a^!@9kJ!z#y@Dkn0`EkOO^ZE>OENQA@oAN*Z;ciZq1*7zOfNM_)8fh=cP6$)Tol z^XnDLPt%vkiS_m(cW!ntgJ}Hb6nvtoMy!DaRqB$liC-k`O88t}qg?VHyD8rzXub`< zHX_9VTD$l3sdR?bKMpK=J%VPHCk+`^y3K+2ST`HkzFKq1|I6jHXXPEk2%puPqAtO)XZ~>e-*m zoc;WhH6zib51nyNp`ciNk*~0x;Cp1VW;e2|(xqb;?%b;J_I9(X&EgnJ7`foZQ+&t- z81X5HWRwLhfrx|{i7(j8lztX(KNP#<=6Zoi4^~(`Y2&2rHoGI@`I!7>5(jezU|f$M zf5?94G19gpb8p&lNLh2aL&y5Ij`pz!A+PDZ#?BbS#hC zzi!C%hiAV93I7jE-yX=+|NlQxk|eobmng}tau1s>Zb@_{VwL0;k}%BXehWjP+)JXQ zCXRqh$`Fz}7bmsVf5Y}0$OO18@460h^ zetoEGkT)U8VKL=VVzg4k&D8V!B^V8~F{gmwnM;_KS$C+bCWN1Lw(L%Y{lRW7Ax5#$ z_jCH%LQN``W{hvAsl^$uiIl-BAT~Il+>~C?;Ear8s!2UGx>XWrdR05QQk)7Ee@Dl4 zVPav&=v|SYonC&8q=sbObW3hH&6hSQAC~i1je8($4xXq00}yWI4YgDD9> z!3o}$nzM}7t!YkbRl4VbXt+9|P`iNE0ref8-ni)tHjhTWbGF|9P)a+^iaKo`wpAqj zheGT^3jmlR&vW}4E1aBq{Cvb*`!mJr#(0%f@M;T#Gm>)$|AW*{w5`B}0qt9n=& zPgPZ=@yG|HZvV|^1)+`rI~0;A@Bp*#&aCZ0iJ%_U=WiQ2c-;#fIZm7elrDv^A0;rn zg_TiHU%qA@*-^SMsKY}BQPlO-=byOUdibNw3^Y!@Lney2nNg(KX4TWPjccmGB*FGD zgf^BMV6{GhPdP^nuf5{(RJ(+;uKd|uOh6y<-gS?Qw!-MBD~S~5Dl23H&a8v%{hT*! zCB};7ILg~SOd?nGP0&mBS0Pdkg!?l7GMR10P-=yVeRHzG*wT5hu^O z9`(nXGL}raGX_2P40k8Kld1++G2Zk(P^8H|SD;xt_8+8Pz2k(#@02Wc*lPY~Q?_?E zSCK6ZP%$hiPZ!BijnCf+^8E*w2`hT#hK7qnAW-I77>$qvwSzAx=7khi*w=NxRdEfz zLL}jJo)$RxsjOD81RYVQfk98I7^AbGY`B`Te?IoreCwajwoCTLf#4Kf!&py3>-=O} z)K45SvRda(RL}q8i&#I%&`Q4+!ZfrElnR~5uK?P{cQsVy`=rt9n)}z65F<$q5T5&m z6_5u=Z|iW;cBagwly9_SH&w=xewI8szmp*5W_v2YR7gozer#XZ9iP_uAK9ByAL95O z?fxJS3R6KKm?Vms%fuTq3cKWQP;?6#(q~9sLP1JKPsMYCVB10Py6n|ElARU77ruRC z#D0E$BH!c7SsUfcRy^iU*6^7umxot#l;%zN;5v2UE+MvGsVQg=R^l##zwLrL z3AEF9?o4RHPMi-sc;mwccL>QU^6x1Up6ZM7q|HYJreEf|iUSA+f-B%hB`Khuj&oI} zygh|Jm!N&KwCj%^-Unlb*{^kbY{r5Q)fn7somwDVrf&QE95KFNy_4CXp5vU-T&+BE zsU-Q@G11M2?HNQ7rb@UCz#l;!9cfZhC{_5V;d*P6>gM(sa_)oDHm=_m1k_qdsW2np zwd)6zVSQsxk1J18bXQR1vf7p{Q=DF(j8delnU5IzX1p&Evy`*msjji(pS~FryB7<& zmUixk+QO##qU9^elcuffrgFD-I^f(%r_YY> zZmZiKOJV2b2sy3}&va_$JXE&nWPu*w{R!!1T~g)Lo%Hv!so;8sDb8UD1Ya(BI(F65 zv53)U5L&p7vbpHGJAVlTZOZK@e5s9Zho@w@=py9rESpceVXBGh$5Rja$3Dee=Dhm~ zl+yo$TxoDVwt2TUX=%@gl=r4ne+)EoxFMjj*)M`nV;he)nOuRX*5I*MQr$F1!bkMO zy_KkiEw8~nO}9}9{S188JIJRV-jaI5_pY+t9S5tDEIH-a%(5~9DF zPnTpS(F%w+&7Ow@{9 zC%>*OduIC3eDl5AEpp`YYH=-Ulf{SbKj*hP|6xUD3~VaLA7qIVdNa-*ekhj@T63Fk za1WTw!4nFMfpfbEaB10go2~}&>-SEpe8ok4MDX^c|YCI}TYe zvaM6LkXLPqzM}0vODO4hX4&#cns+?n#M`vP=4KiYv1?`!%zqOg4box~HZp7~E|i%V zPV1h682)|iU~CqM6Ece_!doxyv=6TX~wI`g+&@5ToHKp3J_ug)#|l_(cA-U zi>jf(b;unc?GPEGDWQ&{!HgT>8Ghc3_MrcG05R3ZHuc3O>!X@@51Ici1t76?iT zq@{g|_#M!5!wD{q@$gFt)nC0u#>y}0U+={IvOUtjq#*09wm8o_Xi4LgXs zR6C?gf;b|RJoktg(>oQV<)3dNyt&^)>mxB9@Ti8{)#b>m--XV(*;}i`9`v~CA1enc zs^Fla$^sR{C~PGp%B&~(ohaC}t~akeYtijWFpy%3FJQhb(kxh};5VCA0t9yjU$=VS z|3^;eKs4WG_ca7qb;Uq-@1L9rCh3h=qOZrSX6=8Yq0w~u?WZ^A4ek$3%oA3zEMKH7 z1=ppa-R1~ychWdSA-@!SbuU)>X}En3v43Lioa?`U|Y;yhZ8n9=!jCXdduY1_FMRltVKu(=p7U zpN{Kk77D9%6cZtIHn+Xt90StCns5O!k?fDp-!F(0?7uVEpZAQcRPHbbWf;Uy0>}@+{srEnwxm#15SZLTIbi(ZnJ0S?qqxsztT zQ>i-<#-5+OVRAG4@vF0#Q+CN!YEMd|uC=Tkzr$SHgw2z400!4?k5!NWbFshC6C zAlT*voLIAb@U8OB@tOTEG%p073b#SeAW)Kk%?B^P@?{0l(bDGh`kF!C*uKCXgYVv( zyv$WjIZB+aLezuFjH97Kn;SZCC@fy%zmJhe^XeosmY`zCMf=_j5-r5zp_=z^qfEm zu`aUZzl`hJ3f!6T9654*fPC=?y; zfGKpael?bL9X(W_!+BfLq)VwVr-W;)ow#j}6^zwL^+$~8-`Y*u6okeg1W{W4CmP=X z_O{i{3EGJdBE>hOY>x)xy4OJnihfJTy9FZ?h>H^ee63!9hR`Te*S`_G1-8wQhnUvO zAX}6I;$0^kJTbzb6=e03B?R}sQ2w}c%6m(jDaRl^2isfW5#x(g3#~HSBib2n4qZ-d zIl%mb5~Z^!pJU$vrJY>a>M>s}jW@4k6KSyLW=SddlhLghp>50&=gH5-Lhr5&@J6(@T=$PAdaZ8^_p!Z@lyu_4W0hSgH8sr-|;43lUojV`U272KKEV@Z2QzUaVR7jk_>g zQkgeV;^gEs)hb`{>A}edJhyJ%gPen$+!N}0%%DA@g{gE5Z^WsFdpZqH<&w8xr>jSE zuX&zu=v8&mcQ4FcLTGUZ5p=s>PE=?#%9`T2rBZiJ&fN4}>A=p{Ik0{$FFQFfI0-Iv~uSyi;rSTcGJBRYg9<4gi&k zdoPWH2znMK&L8>IYDD!d7I|iK;jng3(IF@90c+MploxP|A#5{(mvQzvTYY?HI0Lay}H7s}e7&Tm2uAADP( zxx})rU|P*|{UXN;BnyQ0Sr>nv4u3aS+8dlnRGE%20nZQ(2Asm*wjMp=jfcGKL@S@q zG+o>qVSLyd6st|HGcy1h6~hOxZZMqN`(U}HJsOoB7f^Gb4Gw6J6Ru+1g%fimY5w#i zQfGlSebx5hNH@X#+og0&&?LWH(iTPm89-idU5{5d#(>A8G~5n2IyPloJb3c0-I{(i9aI&nic#enT;*Bi=m6l?D&R;oDun8YoVF#^ zAiJYjwQB$Mx@H11h*RGr%HXGlbOk=UyCR z1>vT=*!MV%ga?1P=TH`v41KnWBPRaRsq=A5()nNIPq&Xmz)k;yxVZs-&{mEvOsq#8 zOs%@~=)5dvcx=zF=9Q@?AF2kFyc3ISa>8Xf$(tO^5R=XIin_`#AB$RN=>dN2nCkUb z8>RMh>j^a9xrA7!lD|q%#h(4Neec~2r1*en1QtBG&d(L@UpZ}~v8pAIew7|O9MR1` z8)5vMtU{yFwW#pt6Rj@7UmL>DGa{80CG&QEhO5zvp7MwT&8$-4+i!|Wg}L5!Z(Rpe3R^Kzv$}PR!Put3dZ)woYLTsjJ1GIs zFPK$l3S#Oa>&eRQ*tvRUS+Z89s73vK6(DgNH@pjB)5qceIK)~U6w}t)xCyU;jno@V zo@quPxmu1Zcx`SgVn5!%OX3_h$?^R0D0R=;^W*?5MWJ?Eh}a2_XM#Gyscf0~s=I4b z^RM^OI;c{+Kl0r{y(8Oz@xJ=gd9RV=vS8k$L;HkV2V)fE05$T-CMY?o>${xK&(p}k zZ`+^iy9!Q^81s|+21Qqr``jPK{yus3iOD{%CemS)yw5w$W?01@2Sdo^yFSWW3rz&d z!&ymSfIjL>UyQGvxu+6QK9Ij6r6PS)2Etg61&=~It%(015^QJ5D7Kn^&~Ea#mW;#C zH}VvMWqKzFb8ic)nX%MFV;+oukgmaJSifnE_@@5qO5-^G5GRvxZxSYfdQ`_qVw;WX z#-$W;WcE+pU3(Imt^0wB0FEP4n`{~Hsg#Ebr)rbNy}>yo;mQ#S-hs2OMwO<}HyCNS zuW%ITi}w2jHq)-w`#k0j z{{4AoV_lxJ(njzpoUDQ;!xTq+OBoA=qSJ>UV9LVWZ{i;>V|bBdce|BaU*i9upq&(fL#C$&YtZN_1ZcnGX1h#5iReDBSP) zZlcylcq9m&^qQbv35E)3=?WN|V~tMcjG4a{+C;ToBuzJGGyOS{P1bZ#@_LG~5k>gO zj4h~*7J4!BsZaD+m$hooUTg`{Vzr&Mx1Byiy$tlAk9jsU#(_+*d6b?#hrF#B8h(;@ zT?-V%ni1Rw4=LX+AYG!a9;j8yNG{R1^7YYuy>%qBT81M$n{n ze-PULmZqto9ggY|kE7bqW)O<6Q)m%=q(W3Z81m=?8FXjAb?(9*1Y|r;Q%sxEAut+!stsJ|tHyu}w?d7hm z*$0<4qnueNG;xR>&Y%@E&FX)lQ!hlK%HKV5jpeNtzD&Sz+j0gww59?s047Dq&m$o< zBeJyTqB_2MK6Cp9!Aaj@sjybR#SfH7FD4wVDp;z?^>Fx^6|+yUGO^{?AK_5IiTR+C zlW-Iud4Ya|&)WjVmMTV<>fVS3!#fwC=pUUy^5Oc!DwTZn=al&-)lFQbpwHrGY@o$g3GCl~OuQvMGw^Rsb?cj_y# z-OWa9k^*GZk5IX`-RSX|swi`I?lkPxKrr1_ykFIymp|21+Tq#JEfAGPKr!xv{OF-s zN!tU+{x3BahxC&3l37^CzZ31GD*hI*N)gCA3C_x|JoVGkpI*B6#U)f|mRq}{*-Y^f zZr5vDcWzGYJ>uVRK@45{yYR4NJ3)>M^;Qs~7gY0AyL8>?ztN;>xXe=|+$%g)Zo`SS zr|K2~J^>h&g#O;c){jN>-C48uxwn|{&q3o0i4WyXB?2jBA<@@fQ(uzLp3TEja4#|L z2;c-X6)l8%Pz_+qfVbn!>1nqYy_WJ05dq0r#|V#=ZYSwnZ8U|Z6&)NrGt@6P$BJAWrIgj?~F`96W1 z+k&c2X;r7MW6l8+%wgcxTka@yf41dF(=GRR(=`l(hL@7bs}KA)(#Io=aB+mX#n!^4 z90?pnSa9$ub6VZyG54X9rSl9@HZ|n)jTXYcqKXkSzKPmf^wT;2f^$!N%qjYlke}h+ zzx_Z{;psZy{+z9}hbMIaew#a0yU{dxEvj=fuFnlS($ohwFHt^d1-1vatC*tq`lFcp zvFVeO@cx!<&2W>Gdl1hSljh{{YD&EIK=(2M5;}}u6k+| z##nTQ<^G{cAgeZfI$l2yVUe2d2IMf-pM!*ivVuCW4o}5)q-gf_zlN^jWTp2>KRN4w>KRnF=32wFCCC?&89u=YJ)u#N@)b^@)!6+uyiM|jp?Lh9eJZJrp(svcJ7R7 zeM)?m!-t-|sh~3s$}*Znw^gEiBe_s#1R=3$DoDMqRWxWlcVSZ)v+t>Zbtr#2zi_Ief)Ta{0XMVSL9rJ_Oe*I zsf;cF3bXer0#HUDg}*Z#egj15159!h`8pdt3=pQ?3*h)%Yfn>hWtsg#2H1{$eqoKh zcIM;$cWTuupkI$j76x*VwifuY2X{1&*Tnv;iV8lvCzLhBPbPJeA`$Yn*vW}j*pZP6 zUucI!aK-Vq&ouHu*$}FU_YA1s&W7*DYp}Iy8IJ6$bvgSQawOsher51IK6KW^dl)`f ziVy|i+Eakjn`Mm9R8d3D_K2U6)B3aB~bt#RnfYei{RdB5Gq@yT>6 znZoETC)(xX85D0z=f0EQ7ffv(0g%(Z!;mEs1i6Q%y$TPxWsES^G6iA04yJIgNZPuVCtrnKn}&5C^~apael_sX(*-YwG5;FCx?YzcRRwklkCjVZTYg z&e`LjPOw!?!@G+(h=1X`en#&YPTM9hy91lLTbf$a0Keuk~t5O-5HK%CNtG$F6uwS)jT|r?Or8NFhWI$)| z^`7^ZYf?G~qj_4E!^?MXAsVzn@hI5+(XZP^!sYNvs4I12c!lY^whzl%Ue&uAVT*9X zg_8lTE?RVWYpy)BE4|4v$^FR-!a=(JF~FcYk>^ zs3@3T;ya3Qt$=npy7e=>t1t6bT_RUDTl|s(>8VL9w&7}h1?tqqvtK-S))(pnleL_Q zQcH+vOy%I}4(k%~1~aLaA)Hj9`F2FYDXPn62K}geS#8A+I8NxYeGV6QXBrv+H8cqp=Nsa`9WdSGN@b>e@+KXq z{iSu`cGwi{w?o_Z&zsbeFqprK7R%H-^m8P=)a6Hwxmw8H#+ru*^I{F=?D*#eVq6qszL-;g;83}&XWZlSv5~QC4CCxpzEmD5?Z~ABW zo5~Tp;8X3)^l`Z70HVWDH-FSq+e@V3H0eu`V4go>c&EMGsM)BcK`6Co|F@`KzBGl# z9}sxOvvTjpt!^HNzd?SN4ca=^oM9XDOh>>CF>~)Qhq%W;G21nt5|m+-)%xvck3`&q zh8s=PS_pHmq5HU}**b(D?|~P7#v;8CTm9hj(4-v@jZuYmfH5;V0DNEVoV26b=sDF> zaTlXxzu@w~;{6IP3sXm3AFK7iD-J66?U&W>7~b)7gm6mv3ThteU%Z9OFRsp{vU>l8 z%ffWgW6+x0Ad2Gx_?eISzOZuoYG+>AZSQ$c_$Va;27X}F3-%%47ekVwSy;w4sx9!g zr1B&?RTkzB$qX=5S_)Gq;FSuwbLHON@eHxxYer?inog!C(h6eiX^3{!g>kL|5_97R z^^WhFhEj-N*>{TJ%D+t3RtxSi)@u8G$7o!S>@$8Zo(5PW}AJ*vnw0nL;q1af*%g8BF6i0o#)J@r8%+ifjcRDEaqU zn=)xTiuxi?q=`xS0uJ<`wPKP{54X**etQ3*V(VSkO;D?H z!eJF^b>!hXO(p~O}65cd?P z2#m6@4iD7K>ROyfzB{0|=%|p0(4lb zEgx<-!iD}Bmnl*UJb0jrcjBo1n~4IH>KLvT6Thoh{Naf2qsTiOXTU*tXxU+}*fmq= zzqptGulW9Y6l)hcT2>^i@(mED$F1KuH!v8h{tmCv)Ni-;Yh|2ZjCm|Pv-Tf^>BeZz zj@B7aw&k_x(vE;g@usgJK271euruXcsXwTH;WV*i|6qyAn!hfWt~XG5KYs_)4G~m0 zt*{szAleEgjcv)6x;5jP$q4ta*ZZV5>etBeBh7+b0UFB;aHn<>5=iZM(K`Bw=SW8i zGw_u1&zRry*emu~Y#YC+y>K^-V4>*UU)a6jWt@~O)hltfY7N-jwFyv&-D(1|VL2(; z2^ezW;@8*4liVMR#AEmCM16^sYNZq($l&9(+I)Q2WgV5zYNN z>*B!7rMNZOBHZsQ@5)qQ+XJXzP6AAnJEWi#-+@1ECG^u^`qq>MuY5vz&U!Llgxf|q z@K_`8!=rff#4M@a;@+uNBJYU^Sj;deq0pssV#i;<1%k%Ek8knl_`jDhH3|7R3k5Xj zKtUn0FvuYd%uPXY=p^PMpuxD<*0BDZtsiJK354)vAAb{gRL?)c-1c${ z0189)3IM{4;w}li06q3sM^5Y%4{ysKLxW}Wle|m0*htd;wOU(RF808bRd|!tqL@)V zZ{DIwfIS#Bi6LX3A5X%F08dBlG7j&Nw#z6A4RA@@e@`m7r0Af$V7rWPoVw%{`u58p ziSiMHII&y5EG~cfvHE*-+frg-`dYk&^g)xAPOdR`h`WbVfHEjJ!T3F5=BadwSI$SV zPwtS!H_X%uvkJJ+vdSgXnIZ4FgQgoH={m@RLD=SXQ-$mjP}z8ctXBQ`4@|1MvMEO5 zNLS41^$Fd-P^v9(DhNj=J>P*Is@S^DXvHv1er|1|nmEgx)|}%75{cZEWEu7jsJffW zgYs?<%3D!a%1T(Mdbk(dh>GTL@ijb~7PJp67o;`WJz=ZdC#;1Tx;0V9^~-&d+=1$F zjT6U!soRAyHQ1?m8vqNB($G{dO*>lk&{c-r;x1dgJ263!h2O`;kmPj*O)hp0cMrNj zvvQrYlNT$+16(*sFmZ2!Uxg{Z>kG6E{?_@jvQTu;i^ySc(Ii29XR4;AL zWOrqzWob+p4uoblH|c#TX>Qn0P2PJ;9wNWjI!%p=#Uko0}1vgn& zEL*!qo<4Qh-nC!#0jq#;Ob)(Wv#|`g-Am!RZz?>x;9NMCw~ZrWr&0*_U&HiUU^31w z^@*@yy%eLLV=ua( z^=qYM(p2Lm;Dxe3t$T+maMRKHe!MUQ$$;?op;6WU51rY}ogE9{xrunbiir$^YisyFs zzj^SgG&i-E;Aih{W1shc>>PhP$F-|KkiOj<^H1cxC%sX62lqz!L{l}@l#XMog(JA4dTzt) z;>Aq9{I9G0V8|)BZ1Ck?3B)89N_@i0|NO(f6r7RJ7to?2$NsUK9Z0t(pQtDk!r0Mk zUrub$0<;x1ZS4Yb*nynaGfZiytj0sYCpdIg(JIm`Lp3wlJ=H0R^}G}84g4Zt0*q0V z;Sb-neWjvAkz4iY*;*}sBRU}=S~M&x@E5lacJd27DCJ#Z#q{x0`<@gR@GFgg%7%Qr z5)#9#0nU3;2-1K2?_M;*e0XhjxBRd$!mAyqWgP>PZ9h6$Gj|BQ9bT18;6{30ekKV8Gc#-U6`2$gj2$NU<(wR zQ{QLwAGZCu6c#}>P{{!_uI2(e-yKvxszN`AY3vwP*?N&g&m{-li9Z z!GwE5mc2*OlH5_uKKNs2s1sd3W--U3Br@X(!x*7>R8BMr!lrTZF+PPnzz#LGK3azq zfBT$IXGccqv};OqN-2NFVgpJrBm^WJdH*E> zfj|GH{LNT^eGu4ROuG}AXpN!m8JlymZb#pd`xN+XigxW++)m7Ii}Cz51cU4awkaeT z^ni;jaQx6WPocHMCyzWQ;aM%6^K=u; zFwY92ST=h+rEhW}LJ@T;+ZXaXXY(~$lYKV+8n-*UBkOrcqx(yTwO?Rr9k1Q(Y}Inz2G&E@yb<|g{+fai6{MPEG7H++>=2P{9S$k#XZ7USJ~gY zVY~K4Kkw1>1a)C0Is0B{on(su*ha=OL-`}O8yzwX$L|}hoiye1uW@fi`*44fszD%v zx_$)dwb^>Rsz<0IAH2aS_{zC1UU%Mc!p;964-ovw>J|d8PCh?TbetUXJr6zle!s2P(3icA>H1w>wZC!^wirz2fQtf(%z%Ai5lx z)XSV5$V~PZ;FT|hcbq}gdUcbYMQ^?D6-bHN1~-2PL6R#qh0w$XP>d|sdH1v55`)%W zEv?uGl9J%UC~a_wyR_Qp{XFNDMAE_ya0@JAVHJ*czKlDpMRAgqcDl?RPrYZ>7dbD` zE}HCG>IFvXRx<-Y$rgjZ!$sj6-Rxb}MQ@)Y-PeP!7zlv}+^#+g!RGmwT|=(LC(b$h zj_X>9bkM$~#VUi%=72w`l7BV698DZ|++I)C?Q`m`-G9rvv+0yi_>@ru&VdupmZWO! zK}l2%u>+*zuVZDS4FgEua4wvpEgSESLV;SEQhV=1e8I1*(-k$pSxJ!P8o%cVA@+H+ zPP1f(M18i;h{0qQyd+fWXmS$wF<8=t!Q56X-Ilhu6o~JVxMA^*sFdxVKMA?35EE*@ zl|^_Kev;=$Kakytm!@Y{%_6g}J--@5SGW=Btc&&ptMz5HG|CFdrQ;_q13tWip;aI5 zW$3ueMlRRzY};dN;rmecfq>5drrkxcCi78b=EA|t=7$V@wUgL6oXr0q_c_{RY!`rf zQYi8){DI}~e0<+yx6S>J1{o*if){dGQYgqjQXmc?qC=2!w3c5eE^P{mEDbu|rzoZD zKQxiT?Iu-2X-aeJ*h(P2^Y)Ds88UyeyyNe07G_7C8@=BsT?FgG~q&jzt*gDQqP{@e)L`0a_w>_;~JQXjgfOGogcz#vNvJtjkPos&BbLgY!K``J`7d|<0iEdu#t;*QjR3$BTwe`#G(=gxWlt<{U z!s>P6Iv)zgSj(lVSzTY?j@+#MW%Nzm6%x2K0CD1gnw9(c`eY(;KgwIU!&&KehU)x_ z)$s+y=`|JXYg1}7NG`D*L2I*@@H-bEZbY`*$+e?Dc;D6%Vh{&rRp@euK_g;OF(^8I zTxz5C{i|}##_4unBA;t>08}31Mw$+G;k<9Mq2wRx4IcKby`6PLQ~B1J!$v%jV4MiE zo}u01BI{8+5!mN_;`PO%^n{=99*Pj(e8!qhZ25`Y(JX%JTryw1QDy%0J%Nwl2c24H zMgY;Dkk@T_?(``K<@kIG$jl!-qI^=eXfe%uYl10Dw@oqTryE%gp+cme--uH_B;A)6 zD?b1U1F6>oom9WLT|&HaQ);U(#ci|XduMO@i)8Tb9ui9%-5!Jxg6UtR?nf(n>{cgQ zNq~<*JMAZ`mp_z)0>^J~8({=$2cYfI;cWWXzb2`v&ECj(=U(*#O{&7C`5N- zrMK4PK|xQ)gWr{3*w!WObNh4oJ8Wz_ zg6yB1KN=u+!L)<7U?ph~CVRp8gxT9Btz@)o&J3!VLUp-) zV6q54pFY@9pgsJ6v5;M^iSwS9_UX?(_P4dq*UleIX4K=h?Yg&PQ<$>`h=d&2$6?Fw z8bxRS=6!*86a{U6L{vdn86d3HN;rfHc@N-6E;Wulj&kzNpUmFmSu>zCVVRDg2G7&> z06Q$c!{t+ zK8F zI@jyFu*rweiHslz%#754Nx^Rg5e}xMflh<7=v7c4zidEh4&xp; zL!Y3ibPPrhzF`~0z8||%nWrA79nn%?QHzpf$#VyxHT+R*St?6dYWi}vEg2vrP_mG`G7K2B{eALi-q#w~!^S)A8!Pc1?2a2?pR z*}NQHQq}42pE%UZ^Eh>XC+2dcs>2UkyOTUypg8{j1oB``(A%Vv4|j7l&Mh;{uZS2& zU>e>H?d@g%o14>7;lK)ghSF8|j&1YwCTU@I%N2N;^8$aY} zc`s^fMnb)1E_ey?rMLyZ7r>S*p_LhWDSK%H15MZC(w|zqiY()0hDeENlEOY zt{^__U=B73V2azxE6oZ5&sYK96xB99be-iNsQ}zt3XagddJCjSkBxS8NN!|H6XkxK z;CI+3q>|2KIH|o=!n^O?o?PC){}hi=Sa5yw^mR&YLHeA~^^M-1D7Sl>V!s1+Y@R{I zVdqf3BbW#9gTUyOG2AhR(AdzYQPum3d++9@1x(t9vx2ze4Hm7qgt~^k6R|$J@o9=@ z2lCsMOd@ghVB~W}a2uvZvQ=Kqaa>33S@r8{v2{P+W(Z#y;_m`O`?Qjwl4upED7Y5~ z#x(u1Ce40xiYmLT-DS&}7tCQ!;wwpiCn!dMD-#U_R3Hc2WG?ApJw={YrZmIj;KB>>k2s#(pjNn_nKOEEue*`to;x-o{dbXPEHx1zTZ<70HznAxDEt z_Y2)$J~|LamBRURn&BS8SeN}Ey{5qy{Wv>)(ce$hQc;uVfvjOG?&k;eA-2vpf=vJP z-s)Lx0pm7;-s&PeAAOtqlTiEqIo$&6m!-4YvU0xgJlW*Ul-p{7y*zC^N`Vt`sSSRs z_e+q(t7AAN24DSQwcZkT+o$is7Yicnt1zt^culqo@CJlo&a&+Y4R|NywKps)*Pa*r zve=T_jCGn{@gG7JK9_NZt>vwwuiY+Hd-U(U#?ymWCvn!`D?@TeLNUY-fqy>f2!@NY zV9Kt)-52_3t=-~B(QnkxsP-1|FWvaz^x=xcMt{@G>)v!+Cy9R3PXg&q)tzV`AH7B^ zBIxDT>-jId9J~zS%$DWoprtQ@rk{4lH{)Bf@^|7!0}rFZy{V)bKjy0+@JHA^PC%)3 zo#a`YqgRet+~)lPtKngOP0}Ub6mI+Ia;)9cIQ`_IvJThJrnp|jR4Ef+2zM3o0)rj( zb%F=+ven)(kUFXIlWQg`-wBV_0acYG^cnVpXmZ_$_wTM<-Bi5Sk2EJfISD4DCxbdK zVVYJkcQ|JvekHFB!1`_z1V3opM^XBdgbVqIVd)1e7$UC+7j+Z!Wl&-aF0 zI^K0R=Jy;chtu5*TCWGMjUjF|(K$8sPxY8-MLm9w%{cZcI)CEtB%d&|sBN-yne$WA zE%0KLN`@r0lqhe+)EsJZEA_JXqzKHXS)Iz>+%G6Cy=XEJs=_|9PF3lGChit|BI89wTP@D>zo@qm^EH^wLx+m1-QE#uxEZoc-thq^arZ;J}35qbumk@1Wb zYTav=1k$_95nUPrP0)NL@cugYk@2UEfP&gTq`G)oW(iwk9AzXAf^)RML@=yN)7mBv ztEY^5BD_<6g9xqd!oK77hVpli0JDlmahwQ=WZ^7A<1eBZ1U#R)TI_p3Zpg>dj$ap+ z{~qIapB99YRe*Blu??NLk&p+bpUW(jCd2_2cxS;R(?yRCPh?+v2Rxmz%C?C8-C-zO z*ER6RL`{?#1g2g`J0?>f_sqxL z4r>u_m#UK}&E!*jy6=$M?j;~NETjx6d7d!lP&?51QdrTqWQxa-?M<(zU}>XoknYUr zF_Z{AYW1_i?1lk;sI|WGUa1N;bO(&@;~o&gp?JU2*Ye+amNAxZ>+MMOLEbv-+`AV9 zGQ93L{p0M~nrxZTKejPKY^8}>KOyb&7(V5$;oqkXOwJ(o056z-8T+uT(|<^^$0~0( zjNK6V`(l1?*)ig3FLHKjB~3JE;g z`N5O4y{geJH(VA||QKt9FUYl7m;`LTMq z^A&re!(EL-@n*Vqz@&zZZ#{af%R0G!Pi4*WtO4#M1O1u|jVjnP!FDH`9FsNtq)~9! z{7j>-0T_hQa`6EE0?%zVhU^@eIZo(=i{rJO*yox?uN$aj+?|1a+l%v1X4_&UOK!hDfqnFzARjWvr~<+Zlg>h*KN2j0@%6Tyt=mmumS#t5HS z<<;+c{&M{ISA+=XYrLQ;@Aush#5lnWb^<+v;l;QQ7bCqpCNvHgjGu^p!NonGaYZMS z062J(SVQH2XzCThk)rW?)_1gQ2D6(3uvdI=*EpX*@*^Lu3E~a7KaTkHj~BffoIgD| z*BJTr?Wx5~_^%@2QAEst5OI_i1KTDst4B}9`{oDj>w-~XQO)|&|F~?o!RXNV7F_H( zOhlmJI3?_g+t6>hlk!hY)c*8BPM>^VeQM9@6G+eQflX<=He0KH?CJ~sgw>H%y>{u$ zVBf7y2og>do-8Ko6ETO)D7I1;)tt>D4#0+6h&wiLX9lJX+~MTFl)7>yCy%m~gCWX! zbJR3Eu{J#1&tK@LfWL1mHEqu)Lp~XxkvYwI9f+F_AaCPhbgXNtvjoPkkDR_1v42gt z;`+TiKmcO}yk4b*obT6GAocpQkCJIUud*D>^mp0TC%;(47mHRzG+LvcQ6Hx`?YZL28fgiuYT06t{_<;15G1B9?QyF{fnC({%}+0aYf6q2pHrC0kmgU zuO)(FXj4r;DKGCQbzgfxd%@rr3>fP_&wA)pSXoz0`DJts=+o&b``J}xG2eaD$B@Xb z_6z+>>Vkg3#cv~07rP4Sg-cJm1C_tBjP7V4&4G33`eOS)6_38vzvv~0!+ZZ<~bVxZABN-g9KrcURN z;&JIzURSw=7fc6E##UiG{Gf3#s8N#g+mU7;uR~Md;jR`h<5(*J|3O@|LZ=&P`&;qIUjQ_{jn}%%4(Y-)b;!+-GR#tm% z=n&zNy%%oh1$}PFlD& zI`R(|;a$Fmrj%Xh;(PPRH{F0BEeReg{ShzFENrf^WV45W8j=d%gClL)$af|6?Y>%L0$wJAi32bC+GPqG@N$@mn?7>JuO1JFw{|gjo8wOhD2S{ zLA0@}W`Q$bZu;d14{K#Yk!EufB1{hQUght-<<*|wibCC`bwH^0n>SavJ^iOynMiGx zW9!%kAn)qZt}oI#;IievX%6ej;G*Cr{AhQC5Q0-L&~$rfX&}0V5LI{Ndxl{z`n9JDR-H-7<)Z&m}pOQu`Ql6C442EpORJ^V;$0AO=4^iNE@+pS}&(dV*T966@ znBlVy3@uNkA1n>^dWG~^iSQ7CEUBUPcHLmWhI4@Iov2xBJcyDdK_?Ds{W5XLC$BtM zha`6l3Anfydu9$yeW{4rW$Gurr+fYi4Iaa7?Qgeh0$qu1|@^m%XeBv0sx7_MZNrAULRar(2YE|_-zmG zSuZ;NJnPfHecw?9q6Lu{Z#8a24ZDk5(>rVH+oSX&SpA0bW7OHnfTdN2A-ns#Wfc0* zR|}Hy$RW7SSZKBEvV30oV`-bEPgD)MKb1S?#D}o|TFi_dr-;j^}}Qa}IfT%LY0TYF*Jczq7*nDZ%x$2kXiE1F0*!&R1eg#`Q`k#=lRD z%(8h>DToM!gaQ=#$%AUSdq4NyVxD@P`7ix6~pD-rEaE5^pY*s0<1RP8lWJi*cI zEYdm(o5Om<*h40jskGT{%gz;{O^xr)Il*d^4Rj^c z2G3J0i5ioLB^qf}aBgRy-`G9CLw+cYov#ib+r)Q*t(@>>C7Fy()`0r3^7Bu=nx8_9 zGWk$XV_Gf`!e(lzBHT8ZQ z=_2}G<~-qxqva88oj;OlTfvsOk3bCH7xpepFBr1wN4>qEG>)d@i1%`Y+^q&i4h{OT)EKE>5)*5-3@7U7H^ZtZW7m!m~Tm*K^S7d1R9 ze7p@h&zhI|TBU_98_G^BZDSccpn!6Qf>hg@KiWRwN=dB_=xlqT%NSa5K&=h=T(%)c z*IR;yXF{DQ+BKYPon2_8U$)hX7*z*vYeKfFVPnQHx|7Lbm3RI|AC$U&E!62 z7&Crx|L%MCNF5o9q+VMEU%xV&CT!deCXL$Gd^RZ~@B98<;y&RdiQm#N*7_+EE^@t) zL(ieasGFgjJpktnlo!d!GOcum6iTD@saAFVd9ppR^-_Fp9W7}GpQ>6eVl2V#e`qU-sC3kI!~$^ zn^%{%N{@j2X%{=pAy52*^zHD7KNZb;bYwTVD%eMv0T3e>(>yLk%StO+*av0KpsYQg zD|Q{bxt^AM_ewnm+#gwk0vkFXfAL+Y4-kXq!7f$-wb1b7SlSt=^t0mv!9_TbYbiY8 zMb}C0a!gwv=XMyad+8P9x(B?>36){lrzT@^w#?5*;uaDpvfp^mCk-qEUXjld@gFd` zIdjiMNZBHYo-d-9UvyaFh+3k31GzcV#Y-uOV@&@4dTT+f_ZV5`)v`Xe6e|2`zv?Z4 z_6LSmTeU{-zno?(0>3ic-*T9Ek!tcP?++n{^euE;v#M#?`Vr>Aj<7jsg?532ceF6| zHCZc}VsUn6{4eZAiSVduhD-qK6NEyPw@n2hY%EhdeihsqLYRkHiJ;b=81-y`%YSB`r+8`2$Q_nycl|#8 z?Zn>g0QrG7ra8Ejc_iAMx z6KhOAO}KTfNbCF?PxnT#uwf5niSH#pI^Id)D>*y4!I+r8^%gQh8|NKgg*qDjZ ziMCEt`sp6D(8pU2@cs6e4!ltJEw5LRglh#GeZ~zB0S)Ur@Ec{cWnvS{7%@JYGUnHZ z^}E(=vtf5u&dk(xgLU&9eIAT3iKN3>CDiKc-}^PiGYuVwIKJ$n=m0dQ5({+6+~A|| zR-BMtP%tGp=xSX}|MKA=o0nlf7bO+&oVBpP%B6V533lH&KhSa4Sdv+%X5{2fDBW9r zEcc}30lGR7YZmg513%M7H)7>t-6zCBVpg3DEjiX-cUh|VzIEI^{ve|Vg5kMX^QL#5 zFiEj(z@XTp1XxZQF5ABL&6)InKq_q;eYn=C?v0H zxliOy`rzw)b6pj44@0kNo)w{|Fld%;3PcZXa4zOaT2?wm&Va1KU<8VfInD;E-g|g5 zK=zSwVDeJXzgN%ZWqnYsv83ESu^=F?oA_(huhw_{39v#ffJAxiR-Z(y$cq0h_OXSn z@0ra8=N}uf_oM2?7(7AG%ifYDT}%{`9j@+fKXg}&FCm2cnz>@6;wy&&G7tYo$UkS3l0f`Qwnt#8I5{_nkwpHPA59)TV0mhYgI>B1@JV+%C^N~0sMjRrm<>xo z&J*wbA6!3d(r`R=q8!7;?!bPnT-G7MlOLkuTb^3{IeW1zqWZSC<1OK}75&0Ra2Uc{ zv56C+nik$_aig6p`6(tJ5#{>~b!n{t5L4Q4wGze)W4qOzVgJOH3lbP64k+BYKJTgC zOw{n%Kg+k@wjrVCIs~mA3YJ(HI?Nz({F!0zw2q{#<|!x>0%HVp6pSF zW@(EgeBwqef^@$Mn>*Srb)q6?k5f(3BOwpi&MIIOfEg5`MJODfjXoWA>FtGfDL(%3 zYxQ?5J5!M$vGWvW5Bec=jjVrvk^J^K{fniz;;HVLsp%l+q1TH@P*=GDm#PX@mbmyd z4wW*X_BG<;4x3e@k;Ui69Dc>PFGG2Jz))cjDEHdUtcC&xai0q<_>%@wk|z z9(w)1WB-3gKR`4U*d?}?_OBEVeHPYDhIrF#JoxhWFO1sTnp}5w_Q~c4a9B7knJETu zW5c0dU+UU;t8!V?2GM)8s%=P&aolHel^cDo?{v2?D$TT_wyLT!?si{u5|}t8@o_F9 zk1kA5ui!(zcPC8KTE=m2&$+XE+C0`~src%$+Oq=6S1S6Cn;a-lJZLnR4}$~Ld3Q3g zk|LPS&|seI#h2CY>Y|U5=Re#!9Ho`$E2YEx(*(;4GMDeOzI=ev!O&JBK00dYfob$} z5z(s`VwLU&Y=^z$UfB}cL@d*^>toQIIofS*%ubDEiz$vumg@_@ty-XjA8A~qpGzZ2 zG;>3Kf@Azg*!|qDitX4d1}jeo4smC&+a@Z0VI?pcMkEAeT*lHwdMkyEOHRDmy1-po zgjqiNbJv+k8&daDYA{2rgZ6aHK+Mp};m6wgG(~U6ZxQ*eFyX}6J6E}7@Mt|MGVX-< zDiTCce7usVYBsxm$9)JX#FPohF@1mAU2Dyqw$EBTmUoxFgd|I4`Y$XAob*DC$Pj~r z;DUTIHS*89eVj)YPMsaV3A2fCUVydEu!I`&FJeplm2bqXI6Rw7?Y{E6eghFZWXE0a z6E!-KfR@X(AL#%StAmOAw0bPh>>`{dxFDgUriqxdXuJNJJq07C0=1MQs$xb{lrLhN z_g^z^F?m=y7%5cFr&Dt~&I;qZeqyY-nF44(hB6Y;<-Qa=*sMqMh>-P8pNb{j1n zyT#vIW})0f|IlVqp47+Dt9Uhg4R*77kx&21$P%7x`#$AvYKTJed%Q5T4nUiV^rzG* z%wbw{l&-Dq)jOd;fL%U4!AI~SMBv1j8pTjylb?nign^%>sOCf-udJ+9Fo^yNc}^k( zF78w$z8|$4yO0yB7%3Kw0il+ls0Fr*Yc)Y6e8Lbykn$ylXp}EwLA23D_r3K+K1Toh z6vCLIPzz-bRVaGKDcJWL%+?aNtZz_G&1~ls*Y-*NQ>|MsYR>c^&#L6ZxzGzk>nkM%AnfJ_ga2M}(eiM^mb`@Li8#h3$ z9OjOG3~=%U&=tx6QhFxo9>&Pf;y(YINDC;YFj>he8X{1O7*Q1^QVmJ zf5_F9Z?I-F=(gz9s*=);Hh?!VrD?NylPQEfOt%38d3)On^cObE9+CyKyhOh@HY@PR za&t|B;QUVW8~rKapN5+*TmIMT zm+F@>CwKoy{`nW?=hilFxZiVJx~Im{L$#c71pl7)^dn>{2l5;k1Z;yRb|fThi$5Z>sIGSO5SVm(utw+tsp)M(NQ8wP<~wp9FU_0=Ed z++iA{A!jFqAhpIiF|ZW(uDse4imAKo@#-(E&an7%{;``?T#*h|`JCO}b>e7)n%}Q& zzV{Rzu6gpu#m!08Z1rtbp>%f>n3_hCgFQ_us@-lJ3rtn?;tb(3Dkc?z>QHcpx!bYOL4RY+_ zS#geO{BvdBv80+OniO2=?V+Uri0wPcr1TuGieN<%B zcy~*-Y0`1v|cAH&lg=W=rYl*XDDQ;c-~1RWcj}r#OH;+UvnRt<;ODE5~xc)Tw`zV2{>+kFq4= zBg&9sP_`L3z3W2EU)?ipHJ?xMq!Gz=*$jSc<7!oW%adj(()H@>7HXn~lHYB|AhzKu z0veDb#3r^(@208x=VpRyKO&l6YYeSW#MGjh zJR~*VpX;cgOO;;&B%S!(z*J^J!A6W~PeDWiN3#h+3Nnu$zWA%aoICxDzRGYuZ2tf5 zQ~a-B`5(9l_TinjbwkA~8-rJQdQ{WIwPUltd${W-Dna61ry?bzX<7NgKzT>7!0{8A zH=mI&B;I=Yb5g?P9|fkLaR^|Xx*HKO^5S1XHhs1@c+01)n&-t2Jl>A0;w%o-Tm&B( zJ_;esK89B9J>4DkL}NiB#ZmV=F-5Rnic?3i7M;jA!0yJ$1Dq?IoD@o~Razn57)Zsm z$wpqPP)Nl%sIM5?aptm82u^A|%%f17MBHbzi&LSw_}(y?nA_0Uh%By;aC;USp?>ea zE`7W$rbw~InrHe|_E>elOer#K;>*xd2g3veJU&|-KhvZ3xvW>_zHQvHj?ISCqN6

pf}~mLjGF`*XYPs9Ls?R(eL7;XvM{%A@-Awv zxJY_tOs^|V;DaNV@V1HDmUowW;>e<%Gzhp&j?NN$cE*4C(P;Y&EdSY!%i;4?ky!04 zZPp9G5C;-+W2ULod!)fLf47$rLx#z(KLT>buYyQWvb}en8+Bu6LiDi-Z zi%T~AYh>Fd~Io; zQ-JDP`Y6Z4AoXQr-@~d($@2$SY*7m-D9Y8Fa_gh~Afxwf1gC{Jwp%p*VIazXhHCqjdFGtJjq zkiS4^@E>3{7Y8#q%e)L_yB0FF`%Y}~9gb}}z2{um#howjgorY@ST<)_uOdyXwbVQF zaZcpRpP_>4O#70F!8{N=Xvu{d8PhvnXp~=uM4dlqfLMLzNwWg1S$NXZjHSEpQEHZb z-o$3DwI9xaECo}Ml-e#C-YN#IC7wX4B?aDCWbg0|!hLj@@9oM1nw{$gb8;_xxR<7X zBi$-qaJ0S5#j3|KO(AeZ?=GbYC1Fl4B1H}k>_4M1TTkls(w;X9;mj8}4P*M~+um>Rn5lz7U%t_Zt8-gBie% zquF04#yr@^RQc#}%_D_qQlJz$Fz^L7`TifN8mMKH>k&y<5%%F>4}!qs*R|)m&V)E| zTz$8Uv=-Yrz~F%tTOD^n64`T(XDVI{{Sy{HOS!c#yC)OIxGeuWfoOE^;hqBr%=#D53l%n!DL8tB%yMs{HMPOWAV5{~rb@RYD`=^JGZ0cF_sG&U&y%rC5$@(X9jF7*{8IC@nM>x; zUE+q=Cq|oi;{JvkrSRweolb<9)2HtxVtKzT@B!t6H>()ybexkc%a*JdMBBP%cjnl( zJ-8l7`Zs9_S*a_h>kyP;PcJamEtEW4EmIkI}Vkw6`C>S+u(59a}3nr zNU~3}m#!7$xqCFRz~uet*pM4SyO6h#j0nexuVC-V>DE6~VP`jz>8ZC@ZI*wUyE(;IS%$g--M=c2+n zy``>shRs8yiKQT_6L~cpBLgum(IU$R1HMP%G9G;WQFVWvqrORgU~1y4PrXo5)_%}6 zV_qUx%e=NYc=To2SF_i9Pwbs*15?jvng*LElq@96bp8gVaplMQ=ye>iXwQJyJjBgKu>?w{=PG`Bx`V>=Y^4H|jY1l*MMN zKU?m9cn@K>^SY6+Ur&r;E3d}yF}M?M+OgN!w2if9fl=_pl@7H967DjvyX@y}63NEZ zt|z0A8t%{Gt4?{fPZ3NJzRBcPpGZfA!4-_Md&XI`MqA9UX)|S&xz-TG1GjwfnX%U% zKXP{N=lvHqw(GyhF~HvWJa*swUhisdcMzAk>WpTTJlb48V@M6 zmdlo||2R9Ce(T@&`nYnOZtO#6A%}eaRh7h9O2)F#WVa~ldOwN%DTrTWxN&{&-EqE` zJ#VN%X~^$e2tEV3u?e{=l%~P!w_l9SlL-IQQMS^{sB$=xnyd~HA zIS+;)_F>eR{?L{SLlULYk_nDRR1%r?;alpRu>;q*R&0D^88)RrHfo@Pc7{^D$hlCF zpWoxbejo_mw~g8(`R1WUC#JZY%4o2$b9j2VE8g>Pgz2Mmy+;$TsHc2uyIA!O(rp}< z*e@hF|77Q3#-5s(0gYq_t8daa3kOQ7o{913XCf^l+WT3kws*hk23HNJDRiaWtZK)XpOdPssWY6W51S1P^NFCY2$4zJ7!HL4K_ z6dd??2qMsZIBX%HqUX;DQLj-lVArzbaoqE*;t4>u#wOeOEL|eu#oZ;gV#$|Yk!+`K zl^(c`e^aalCK!_KQ7YcZVUNVRf62{lyO-FPb3n(U>7lThKj z@bRSS2a2cz_+LjyWPPtyM-gr~aP6)g=`ggZ`3+ zZ7WhUrqOe;2I(pak23}0JDx6!u>{=-LNy}UL`&D)xfc~*91_e@n+uBd>tzQn2}R5A z>N$ZZ+eqZ?>zgsSklI+#BtLoWA>y@K4B!lNYuTc++3BVtw?dqS~p5 zv1*;}TCAbL{@;RlQliFnXJMI_!u0%lDLL5t(B6VbNf^hHN;APN5qZ0Zy?=2O7cEC2 zL~*HqJu$1@>8|GK`q*#}XT$W&14P$yMzbmB|c&N{{7raatpEx?bJ?;CpRvp=QlhB0c za8unWPJ3rdbZF%fl08wWRG4-_X0`mTP)=$NwESd!|wkrrU z%7f9e>qyAv()Ztn&&Xyss!vKOeb2pV6bTdZVEVDX5GLOlAb?#USjLoE9$A($PTY>r zyEwcZLpfw!nFuIG-Ehc{?$KYTK65JJhZrsl2{*@M{j1(X&7~a&+Yi?B zN9;+Kx?MM0?u2Q@%3$3PqJXEpyqc)Cs#H?~c-og!b{~kilYq|F8=A*2m6BO-U~kgS zLxneI3Ox6uKSkoiOa#iLW-x zoD5Lf(ZehxT1Io3JEm_R^0ssRt{TmA#>3$mR(f<@@>Xr|GREo`n?2okN`NEgD7 z9IT&|tBGSj+n9(NhxPghu+!h!j{bRO`3kJ5wBJ*_NsA<06OpkspoZ3W(Vt4J%5}S1 z<#XMv6NZt-fC;E9)(yGGgDo0gFR}H4_GiB0tL3zztdzW{>XjSo&pbEQ=@K-bL_HU( zn#myAUoS&Cr{q9tu0`Ur?20|X8if#dgMos2Mdb&(@t4n!N!_y9B_mE@c` zt@yjycBU@g;e4_eg-s`qaQ-B?1>4{5HQ4X@9Tzo?D&Kq8NybF``V}9x_U0D#2YDSRZ52Q-gOaupDyJ2a z#n?p>ExH}m@At_gM-Q$sDzEGbc3}hohvOu&=?MA~SDI&G#w)9i@3Hr<1U&K9wTS_j zN`!|BE;-}r^8k5x|AtQ~2Qd6Sndi~Vs>T4%s4FElC%RF(>`RG_nPk}6aAa#nt7QUW z|5DxIMXEh2YUNDdj3F#N1g5ivUn*>ClII^pERGdIiO?V?`sUE@4edulOmxp4EOZ+w zs*y&P`4Bv%1Vk`=rDWg~dKI-OpLMqdGtO=zd~vPEf?4P8Wk9o1(S7AFrF6_rC#+a2 zyx)BjUqT=aBEHVDI9Cy?KygYw!nutG9Hb*@ILOR9y6@akEa?gJnw;MG#|9vK1+;>q zZ?QXY8$vZ8Z5-@|F~vbGH~J!mLb2ZuM@JhdER3Fbnd?<_ z!twO(DUw4dvvB7wxJbD}@J53ptgL%!&348WALdn28W)O9e>`=*%Kgi3wM{Mg1u&EC zmRGB&1+OjQ?aBy$7{`p){?M%_J3-c_OJ1~Yz;JgBWn%p)x{P+bYT5jua(9KjHv#g8 z_f#N$%#{ic!&ykn%vI!BavK=MqH8u8-&z7!n{NXiJ})R2<6GpVdOj~tmz4+dsvZ-g zB}w|Gf+!-~*E}#_A9Zb>DD%Qc*VunTcIW8zbJ;44zO<&i{Xob?l};BwB@=cb?ynMy z*U3U{o>-tEXDKybQZ1ZqaFsS+s{lJ8ejrYS0#%_sccEpwWlq^BXnO0tqCJI~7P3~DnC z0sv0ty`x%}CHoj*7v{hn6www)i=*)VRJElbS}7SG{SJWt%51)seCukHY-pnx!oQqn z^Xa_DZKso&Q($6%gwQk5An(2g`36TprsJsf*fJXy-x>x-F=d{FFN^(!8S)@3X$6Lu zPvBNFdm6DL_P=K$Nb8w!7B3C+2%Ctp#x`-VdyyNkG|(QEMvy{4e;L06GvC328_y@~ z|L1w+Po_+9zjLc3)L)nmqLrZXzXunO!~c6KEg5t!`1CODQ|T?znCKNIi<0U zPa?9BBOPpBWG?VyrMggehKw%hsEPeZ@uA%5e&o4nay?3GV#~J^s|ZL}{)+UhUf;hk zy_tWXA=Y94QPha(b@hw&7!C-3lNDh_Noy!@>tY{Z_ZT8O&aD5+<~DEz1Gfzoa^7+i zHJ*-U|DcXn$u``N)JutdbATJ(tgiwTwm@Ooa9nfDk0OIMvAAt5PPo+g@&WT}blZ55 z5K)b8Ld7*F@1I*=%u+A!buEcJxHjE&AT$tZpX8UIQ}xBh`|X7@8&%7s#HY> z<<5*lg4#hKvl1{r2MumE4(41|1MSZkVi{?~?J-a_>sykc9f&pZrS3NO?Td5N$F zCGnA(-Hc5S&vd7_c&dt6fB1oKa4fplMv-XJa znuclPFH>41A{Jlkj2Xv$ZHoZRHp9xnO}qrr?NNd7bmyBlC5UlR)GGZ3HL8T61}aM> zntzS>lQk|0=eG#~oUB#AW*cI?@E006MyZ$A%pfJb9Ne)RK@bO0isvd%m`>=^9BmjH z9up<{-K|7aos9c_dsK4gYtJ&5z_E3|8F}`=IQu8|D?Xx54%n=_5`lem!YbbOD=O#N zDc^mndA{4pxq--~YK9D?LHY}8Z`BV7=SYz|KpdrEkgml z6lxHKUM&%W4#q;J#xG!W$NBen#IYaA&|>+T|c`Ipk>+&j?r8jvBOA`{~Iq(qa2k}CaJ|&3>|3I^Y(~R+;RYQi~cNg zWuhVLPLO(rVD(d2uw~I*_9(LS=GLDYSN>PxpHHf73Ti*OAN`|1*VfTK4yDf#WH1s?nicy;Z#g6A^;fgCflwXCa`|l9 z5XmIDu;aalfkI`}4)@yl!KQ~k7hzQVs1SV!a%Y@{qJ#vY7H5sF>Wq6UOG5dZ9~XfR zbG4DhNm6=C!%SU#{PqF~X8C$$Wxl$9yv~RP_U8L=ZnQ704o42QYN3ywR6ly*IqucP z;=N6RA1Z;4>zP_C{3{(`6`Smq0?hML8cC;aMrTxl`5&c;P+r-PJPHK7F|Z`_$AxAN ze+rd&t|nN@)N-sM#o50`E0?aRwO0F4C(NHsG#^zzGINTHR84pp%&@_u`B@Ps40fXt zv@7Ywi+c|ayhHuU+4t$Ymcv>w;F#Z68zwenaY94fab*)SqaHR#j~?8+{%uMdj@s$r zCOWEuBPgCJ3uc`5xTp-W&oZC+GpvF9!!g$!X+10%+Hg)S@-#Ru#WG`#ySgj2OTG7Qscw zXod{C?JMTT#AxPfjQ`DN<1R8Ur}n_o{^0CjEH2s>Gxlj4E2tbR4EAfnRN>;p=C(3B z%99~Q6KWHR99Qbmye)G9>;!mu@oUm24`s4-*qYdOn<2}hKe!#KcR~xwY@W!!Fr@*| zd_{7DQ^IcLFU+%>$lF1;E%{RYy0KVjIoX~R!*g6i|H*@#Vku1ppmd?KoT@dRE#%;wVe_?Z6gqPik$hDg_hKTw9 zJwgiGa0YV};;Sc=PMt&*+C(*)A5Pw7(ttSnPH50+Yq1-1q9PRD6#UduH1AbTQ7P-a zVV4s{zvUD%9t`1oxZ`kk5$-M(v0DOFU1wRIp)_2AfjrX*@*^Yo=cJkYi(0aCo~byS zUiPQYEpo!p}1$MWgVuh*)#04-4$vLrc_ z$wTWRwrUxiDI;X@9LAQu*G;0NF->zZzZ{tlA^RK;!mz{+tH$QXlb7-QGa>p<;tE{@ zz4Gl%1Mic~T%@e!_7>iNT{9Nt_{Y1!YeThvVTUG=;CEzB*x+GWUL#BVY`q&{~;m-XcS!i;o%7;`pc38uJEtNama&A5b* z&_af|yz{V+l1(x~(SOt!BZqAy{o@;)*95KW-JrH_xp|0tYY&(sMu!vTl)e7Kf_W{W z%V=5b%>Vwvh7(8+nUwvCFcW2XB$n=4?OGm|MO$4zQF&midrI>2ZNN& zv-PDO8e;^(in#wWQLQPKd0;3R60w_Q3bPNYoTQM5(bU>$ruL*j)8mz_ehQ0+YOBv56% z2hPVJpHVFfkt?;;w-0%gG>aNeqf1X(bzezmorIKZSM@+A#e*}r`Nf<=Du9Z9audoR z-0kLoDoL{qp2xXcea*Sv%a3&^C?wY|IyN!R%}Ec%8Hjj}YDZJt)ryagINh9j<~#4$ zS5WZnCg=R6R-GyN-yv-9`8uhMT%t!(;cYD4X0}?NchN@v)V*H|4G$gU0dXuRSMsWU z!O0k%8=TZnkuVJZKeZj=KJXq9#4v~L7AAzsi18i9CcB#@6IPVVO=%)@Pl!~2X&bc( z1No;&$FQs2k^(gXlRK`tn6%=aQ^AnwmB3!Ae ztQk1H6*uB+y}Qa;fuALSYFmnB9K-a~?S)wI*qUoOWA^TMi!rX&e1IyG52(pbS5Jz< zf(we)RIRPrrC|#RMx1rvI+P6cG7e&T>l9eo{@o(((*8~J0cQ(4WkQJPuiF~{w`|{_ z6>t_CQTAG<{sPthLXyx{ScvC#Dm1{jfa%*YT^1>F%QT+;(f5A9@k}GJYS&4KQxPx5 z(n3iuENnGO8xPDY1$upOJ#=T~m_`fC-=6|6;|ec?GDz0d*=}y#LB8ILl8!m)XYj7z zS?|g%TmknM?{St>v@UAgBq3k*{2t(TL~vuB@gp{rt*|bj8X~go-FtP(+Zf0dNxGCI=t(W zX!GgtEvy}oLnH&b=qMk!lvvPTM)5>cK9ZSRPk}eq?4^DhkslbGD6n8J%eu$f{xm>B9k0B zq4%zN=(FFlRJ7twHJ-6;z4&Gg6i=RXVX8ztTaL*C20WBoS#lE~eS=$Kea}%?zQ!N4`xO zQnNX*>n|+3(g@j{-1-L>XP}@U!cJFXN4atC_<$Y56-qPgC){5=qlPC(S?V!A4E5gNauH0%;J&3HU*O((!Y8=`@2l`oo{%;NtZ&EDM! zHe)Lnz~MN1D^m(>iupm=EfZ@7PTTtYU8}7DNtSV$jOK>l;}?zOgjNND{XSs2;gW0> zredGJTJ)fm1F?DTvlG9evho*v_lqp@!s4=oYM;5H%d4;EhjuaX2tMaO0!Q|!- zu{r-)1dq?AfF)@qKDc22(DnL{=S&2_&YfuZKaMaUw*3D%&SQlB-}BlX?Abb9i2Fsm zLHh@+37a)r8Xw#AwkNwzo=_wNDI=J#O+q)ZEZz{rS?pvPa#8OnIQ4)|?zPS23(H&# z9oyhW7=*0Y)!1b%e8Y)j(N>Vo;ea*=L$932vYFJbD~1ofIj=@=+E#)qtw6Iwfn>JFDGTDnm+I-mj{@X*@Y*y9rfAdTP!wCC zow~4aw3BIra5)qEV}ju7;8X;|JUiKmHez_Q`|Y-t+9&q{bS)4qy1r^$&Nw5b@I-O2 z6lm`!;QUxu#v-4P$obRHkZm(cKgPaw>GG}tF9jufX_n?UX`GyFpfqghPnw^S5#Alw zP|4972x7s6mV_DiAp$)QcPqMi^szW{q<7V7ulE(h70V(_#y&~0yD2EmZDocS)0p+5 zMw`?v3gu080mbnT=TPiU3|04vxgL=f_fn-ddLk1$hl(2MLB4Q>tFGmZDkL_CyX#GrEXJ`u*8mqS z@!D^=buD!{XDCf;D?LrMbK#07EscJMhFCi9PivF`Tr|xwLQH3KgL6t}CFaU%ZCep$ zHS~@AZ;ovZhBszh{y6Ie+UYI{&gVuSvhFkawtoJhRsu(uzK4(`dtzJ@c&Ix*pdUn= zReG{4Lfm4!!l%X-af5XPkTN5ShC{5PCOm4VWySlQ%#F_pZ|(>-KfUp^d&!;ZcN=nY zp?=tDAfEp+bB5Wv8(sZ$7Wjr52zPym;9xD68)E_Gz>f#U?|!IrcK@_|6s)rGY{93L z*l`lR@EiBD&Jg4i;KK%P*3uD%69Zi7vzl7?u3e;KG;=J{j?C10q{XgRp1t{T$$ z<>$FcT3c@LJk1_tSerA>+Re$s2anIJop|sr=wbeH%m~NThx$jp$Z;=YX@kfSIO%pB zVbVf=!Y8vN#_Rx0w|bYp%FM(Ffz2yMK_*~$X^2>VFZuED+kzqo_cnPst^2prizg^U zZAegn2rbd_nCGBmC;g$Qm8tWx9*1oH1URj%4-ciXe>omxrL5|4lB+N>^AcL4cIPa} zKU_N0I{O0*YL9%8#nQ@vV!=5t)|D zJo%7;k08&5E%m6B53{E#mGQN~BaBn1WV+&S11)Xu)v>v}hz!IK9|5SWz|e+1AB8L3 z&)?T&m$A>Z)I;HbJ94;0N19L!!8w@K?;siw@dU}DIvEc~X|2KaAMG;-PmrQG zs|?C_+8AQCn4fUrGJ-G)NfJ!=_3{bYn>2T^Z!NOz9D8%4t*LuhViM7WkZPmg1?ImR ze`;+Bm%1Hg-hIw@G)3N0g1H}0cXzSftt}gZS;CTlGPoZhQX|FSuXszPG#Ut%41MB!UF)6>U`I6Qy=iTA;=o|0s;8LemNsW?`Hg4QisXYgzQLy{>|3pwES&V2EEmlI_Ke>w8UKEPo%zxccvaDa2NyXtn)46<|o zZAK1uXKLh~O5DDl;ywiM=5ZKs32v~^!_qt{m*E2b7W?~Tbf(hz-USbzPg+LeZIHZd zSq~ea+>jWTFUzELQB;)d4XQdZCoo-(M_4h%bvjM%)Gg!h;mw!_)y{reVW2Ik<&|GUj~6)!8%s78Z0ySW5C{7&y%X?O{(n>iQZF=!kik0S>_q+3S|Mxr0I8dIz@C90 zStLid#-h)TbM!g?j{IDe)&ZM*aJO4=kf6ZL6j`TdfZE26!iL}!jf1DJGj~#9Y1{xK;*hKDDM-IfgG5G(o-W^M6!6Y!d+0{R><3+fiou{jUY#C-^)EX5!h2zDbJ*@X}QYOPXs; z(c_M&xit0NY{MapKOc;3`z=BHvbPi-WOyFQTj~clzRG5zg(qqH^`8-?Q`Xj34Ysz7 zK5;Hj5UohFAqaOwqJDAnM$KU0$; zV<{3pcg?|G;=sx6_VGfb2IytHW}opOlytqC)RmM%8|`TpTN6SwDV1;qCk934or6!^vWraDtoIByEG^ zRnY4^`>~Es!P!f{`nE%B$c!_WT`TR_VLsQg25%U>IOe;F2caQqOQ8%4kBKtlp}5D3 zI#<$^3TF#r-w3I7AMgLijdaG0;=U$_Vva42>aSE&7y=y2Ud-}K;YtqP%8DM+OPk2y zxFNFZ($GidacX7kWL?~-A!yCS2bwf-vFG#)X9+|G-3^+isWd+o8w!2Dez2lqM9I~S z?bS>d+p%E?qqtju$eqnUJZEr9#vinzebgp^F1!<7y0vupU(ho3FRbg?R(ncn|6 zPRpfJaZ(7ePARA3vZPWjTN10}cDhJtiAp57A8NKDZG1CvU97?Sh&Az6b6FDMr?FPWTuAHgtIeV z$!TcNtIko8F&N(rl$3Y;m-uO&VM^v9Ax6{FN%@|^=-n>bDr;>0ntkwkDj$wqJrP^} za0?(1m=w@crLU46P*pbg&GZse8!WJ9(4{N~VW!5Z3AWX&o$;F2@s5gLDPvwEM&N-S z45C+`rV1ZzS~cNeRmRql*du1zMnf|XFJicyh4bP`k@0Eh6D#qTFK4SOPkxd~&|ebr zulIGR%)(Q!?!bi6x;RlP4*PB`IfR_bx_e9>$&>~{Jkl-Ph9KS5vOh0Ey#3?d$g2e# z)AP>P>n#{U=ro)zc5QbALSGXEy;0|#nMI4crttdnV9;xWyN_^PUHfoB$m`vH`@`Wo zR&FYzh^clE_E6CmNcxzwy3`LY$In%d){4m_1;@z`)lwHEgTQ(Km6v(wt6iOy*^%vSL5U_u^Rm zYU)ie9`I|UsRt?v^Km@)lGG-$3h`RsDmRsR09{I_udFwKSi+Ds0~766(nP5zZ?`fx zro(!4C#BZHOm#8UCDQ}q&BrF?JvKJ+Cu|lq1R8Q>0p|2m((0Zg z6&7QG%ZmL+K>hX@pzOBb6d`y^G}gTs>)8D@uIXM@@Css#p(T8W;t>5b()MC($}Qx& z$%lyK`>ye!E6aWL@(8{?9)gl7yu<|8X`n1CGPQ9Rw(0nG-5sS_9G==O<=t#P1slgK zadvs(EhOF9hF=}XhM_(@^d48z9yetxznWO}X*5_XrasE8`O&zUZVxBlVoxsjXYT7c zzy5r{L?@ds+q^F}VcSo^2l>lP7$i^)T`_QWKAnbrdTlT zoQL-Zwhe=cg7s3)(ywU>izkN(R(PC;B^l)bEc_>26rSOT)Wq%=Q=o(ZjGFKwrMU@F zbFy%t{Lm=upt>bE=fulD7_1Q+JrCp<=!zbZQX6^_c5t<=!7@2mL_f|WtfS@qI{Hu= z0N#25yBv_QBg_+*ZU2$L8SAIx~OJ~mArgxoEEN2<;Q z&6OtEngP8Uud+>Bt!fF40~_)a{GQpfTnf7@D)?B*nDixZ$e7ndQJevlEhZ zqmMFns~i(uF3}@br?wq=g7@VA1@F}M^L97%T_aK|7-Q>Hi=}iYp;O4@H z7;sN1SaT7*nSMR95Np#!tuXOlvy}lTw~j#qd_Qby$ryH35-&Q7{Ns#z=D%%vCHscY zBe3?2JW=qGd+XDV+ z0|1aCAcCp0fcO}e^t55sN?wi`OxHSKSv&)D+jvH!m)5gaTNH+?^@;E-?!SqX}MGh zkZ<;?^}V0h6nOww96iio@U^+(?zeYdV!V`B-eV1|w{4Gq(W-Npy0QBV-c*hRTrp=) zd1u&ouQu`eVcqJ`1k0(p^hN_YLAFC|@L4zu6!&(4sSR*H@@bXrXI-H(fpn2RpfP@e z9ZJK^E%9l0@wr~C5DlirjoZr0G7{Vxr65pCyf3vAXF?Lexu=DYFy>7C-DpC(T~&1@ zd%y8}`fv_utsH^1&u(H0Bffli1#RAcp4am}!*cBa{S+20P*;nt>+=+6v{;LeC2HAa zWw}KMF$^@J#CHvP-9;}n(k*k$vwl}OVdZ`eIBjC^?3IkgEKyq{X>G6x(ttO}@b9eL zq~G^pMAkild5n3%POHBloJ|DMb;g#MNNuCgZ0kD7=LXQohMBoMn*1rm#=h7-eg~xM z@LrStI6xPjQlXS_J22-3;(*_3zDU4;?#;tSyUfUIklLQV5I$A|jn% zjhIF~#dUlix>bCdM<%M3ikeE{AvthZgOx{voR86!Bcm;wkUG~Jsl@F}+r;{}rXcdY zF?oF^XmQ7^0KqJiEcVRI8^>IJM6FJWF`YaCNUS&|kzrO_#W>_s?NsAL1}A8k$XbYK zfoJ5vD|4<3bFIs=C~T8Nv>uhPQb+&GAP0;>h8gZxtiNcXhFsFr_IxaP3(_q&3jP~l z$~}}tz7|If)7m%LDvupXc2aGa+JbYJhaQa%UqUx$)5PIN(wd7UE#~Q?s^EZoQhS}OjKr`gS$=?r*cbOf0>Mh*^IefkiPa#L#^zSZ`ydZo-2P#Ox zWK2ARbrZiCndBYE8G5sALNj^axb!-Fy)&*o36>Y`O-cr>pWv#OLAF}Rku+u$#>8(I z=V&d^W)Ci<+5h)aUoR{8YU}4HMhShK_MIg{URpOPZvFb^m<0Sw6bm(A-2{?{9C9*+ zac!?7o)T_cTbvd&>bpL{J8Adsen=?K@#tJpaL+fZZjCoG6QT$fQz*%WkH>QsIEG_? z93R`M$ZHyH$xU|Y#3aL^qnKN8UFph^3Nx zD+~0&6dMbbIJd|%{)QEudex=QaHqWdLQvqDOr|l3Aay^r_Ixo@aok;Ac@=7!k$X48(U1PwfR~k5qL1C z(e73iGbtgvxEmh)B<(Lym9NDFo+2`NT8kcMIblFLkIhb-52OSIDdnvQzLdD<@p>R3 xmaY587zgBuqJNsgEbR#Yz7%PW;VwmKW=jR1|IfATjN3Z3d)5E*S(P9Ce*r2sTC4y7 literal 68218 zcmX`SbyOTpus%%C;O_43NpN=v?y@++f(LiE#TR!E?y#`9EE+7q-Q8XD<^AsO-aqD? zIaA%$HGOKPtLk|wT1`a`9fbr11_lOQL0(z|2Id3szw0C7zbB&tsfsW#R4@wC5?Wry zr(O0y2SbmImJ^rC$sOaBN15-#5n(WhcthtZZMh!c?lo`&Ee!>wxTIu^B{e)eHG(9a zBusRHDfLx2<|P7hVqzkdq_p_%V(xi1DP-?Q)k+=RCgWlQx!AG;yz!bkj4x z*fd{jV1M3KbuVCDJ(nzOQy&p+Y5TALk$*#G`xeSA2LFF@#sB2=W$iQnE1y8L2tfRQla)+C z@c)J@eswGUKb7dvSG50M^M8vnA^#f=%Gj6vKYgUYC*pt0{*OiKI2q_rChlKx|EKQ@ z(GBna6Y9ej({Ygk)y=a=fBk3ae^bn2KZ-t!Y#fvWOaPp@kI{jF0RdZL5BCop-R?9C;7;&uH@J2gV4TAa$iFz!Me?8B zsJsFYKWx9RuZQF(P>{cT=I5B4B7BMT-Q(TAJA{HiU8E3Bz2#6ok>=ZBANWoFw+)7% zUPGtA3HQOH(b%EAyVRouCEs*q&a)w(%&A9!7$=8R;LQHj(=79J0w!e=7-hR~1 zK!k-ohC!Dj$ph#xy3pO1j{VUmH{8dhzrQ*srv|)a#InWKeO`7%8GtN^nUSyqP;jel zf#~)8%9@mq-;W$i-`2U4YuB3)|DkNBw1aI^7cZC9 zbIwf>o8D2j{W&IJYMDEKI3fSpJY&N!o*^{&%aC3{kbihjpe3%*k7DYfYJhn4y#-SL zL(YK^R*HfXnvWDVRAwyuJ=UBP8l-Sd{MVdM6PSXW#T|Cl-d=SSH?PiZ$?bsws?VJ< z83*-%8hhY}n9QTN2!I=orTJBWXrT7EGWd+_Sp>pwV$}KYzJ2((h)4>lbD$*ZmGUj{ z-c(;)t#cqA5>7{jp9}T{`8LnT_^QacSx^!jz&h#9)CO7c$C+Uh^H({RiTi-HdBP^f~&Ny&RzUqAVMQB@q!_lJkiDClt-o9x_k|z zYR}B>6VY3#+Oxsd0-UpwFZLK^XmGe_eP8iMe!26K({GXjCPwCQEinnX5_Olpp15!1 zF9Z$Xq*v=xWKKXgQ7X9anK|?i>m!^masS7wrSRMl^dyb>b4durbCU1Gn*NsrZH~4y zQl202VpOqQIH#YSgQ@dh_+MpsZoHygIL;vs#=5ej=wWLrK)P7iS}{sY+7HZppmGSn zD?gT0nige29&B>$63~4L@QNZHA$ojtSposVLBba4s7QM(rM^D*{h;e$kDgp;Mb;hU4+(~9(u6P|p{R>?3XVdRI?x(Ppi!4}1_xeq3GCp|6U`=YcPz!~T+n zOKTRar@5Uoa9-L}w7NTnj<3j38|Sj+`;L7vefJ4-d92_nm#6!(X4sJqgIOh^_>$|p zEkaJ+sQZ%;^fOtty%(ZR-ga$NV;p8Cw(1e(qq1?;#Yto5&qw4P`UmZ$v|UK9K&L_1 zHo+~qW3jE{FYu_MU2L0)}`f%`O?9z^EULOctjCv)f2B(Xj(_BmYhvk4xIYj z>yb~rx6RrRIZ{7cE9HN&!a`VDdb`}ssk>!mja$nRGE0%eaa3=qSZG0esZzpf&Kv`% zPrgKB#=N|jaPu5mWJxELB1KrX;tc1YUqJ!QX=AKjm2fRstL8Zb zBv%ZV@<$`VsY`bgF1glZ2%ZYMTIuSXW3rpnth!-AC~z;p z9+;fQVX~>_^AJy$Z-Qe$mjitxS(-;Pv9_)?pUjj|Y3aw*1WX zkJH7?F|NbnM_mVN6AzZQ9P@b-ve4oTl&g#ZIn}PE%87dJ2b+uKcy4sJ>^8Ccu5*?| z*u4kFR^{YEFYQ%r64nD2DMlcoEGu+0DRk??@48MIe{VJ5zxa<`X_I!tig}xUI(JR# z8Ewz^b+=*X)9cTAyg@9-6=t)r-F{JTm)V*}e9TcXx3n}+8|+S=f&JLJriDXc)QZxQ zK8oqfCCWFtTh$)Op}VEicS|l;MfNkIVaSD_YvXRVk%mSZSPnE3W`2;5SBECqCd5V0 zY7snp5&HX8g-UQ%o3@usY{{P~oS?Y(37j7moY;>+Tkt3k&LfI6Ck67Z>c2?@E(hC8 zIKsyYp~jvYDu+_Nr{al5N@-C}OR2 z)+di?j}e|23O<7>>@BJags2-}3C;*(G!^Nic*esIxYvLmUZ)Zlsy#w)Lq$Dg4`$&5wQ(m0 zS6iafX+}GQ{5-S~wsKrK+9^MY&DVXE4bRZmB5Fx@SJj2^2t#1|*+FQoRht?~CWfqY ztNF_!{?C6TEXhzBzlD!Hj%?1v=){0T4WWEG0n;d3&q?+S7IfEcL9@7(`hP@5nh>t? zgS~htjM8D6E-=1gd=9WY_48=RP23wavjavPDjPKI{PTr=9!qh6DfP%rT%tU=@1jg! zJb8+Z8f+BNCvtET-X(_T;{6qJTO8qD0*~hdal}Mh^A_qaIT%cfYK_fIDJ6%swGpz^ zXI)sQGnbQrDd!wFQjPjlWGd`d*&pat zP)|g$;p1CfvBB-2)B4+2%JH!|HI;T~J!@^=3@L|>x6xJ5!Wf-%2W`7*Gh_@2F;cwl zdHR8s*QqSl^b!pq$GkFP^Kqb%C`c?2fo-uv@9KL^N7b{8EMvWs5@LwYUm>s z)$PjqhA+b63DM+Y-xp#ugwjyIgkH~8^cr|L->XXyj77M!uFx6PV>Az{&Il~UZ7w3~ zew^3CImsCN#sg-!PTQw$Vv8f(`fzTS_{!s-CMu=)ZJ1mhXnp8boGd}3hB&vNqC?hO z{6_S2orGX-ui`Z&q@i)8B^U1YZB&rccra|@m3r&C+BArq7%5Z4$fWA z*a>6kVH5N{Ud%>ABtYFGt|d|butO51UB$#-hVe~zg>^?sh%o^ZgttO6{gCJu<)z3} zrY-kf7w++(6R`KbW_4-lH9v|tpi1@@=Cbp!%ypFs%JbkS7iCap+1HC=R?<;ALHR-Y zfJVQ&#Dh>0Ui1SR^uZvGdm-Hy8cnPmPZ3!EGR#3uc{;)|p(pISqpS<0zzq4Ujpn7q z1LSsno{}E}oyY-IdNb%Ee1FDPA*6+}>RY+rGNunuco*xLE7- zBt~B}pp^GnZt zQIZ|l(?ye3)%17c^IYiC79&x6y)Mwp6j8`%t?dIaCgwpWo7afzYv(#ZZzuj?P)jI-@pv9EKY zO(@N)2arjZhy@L_5l?InNQ4IwktnblGJ?Z<0(jfF^G`>&S{qxtt86ckX6^43uZ=aA z*2CaijQc~SEbpX2*D`)+uLedryhI@aMRMf0E)4MAzwCjgKmI^X$L5avh~?B&FtgY& zdG_BSqL&6!0d*A^^~A(I!wLPKy8gy9ekn!y9)-CPD~8`MO)T>PC(~Ey`G;nS&N-n@ zi`K$8h|;_yJojE(K)NGRJu#5H%!n9OX9ps_^IAFLn>WlBtgI-0yTLRiQZ+@pSCOF% zK(k9(2#D&|lAY%*(!#Ev&P{}(`H#lYg(kr{@DRPVx`T}$v>t%HmP`J#xbTQbsZ4Xe z7?n8EY^y4@ov5^yh)Z{Dt?eduJ9jPa)PrNx>*!`m>rN)FWP>42umpggI z-0RW#M7A@7aXpW0tM{?3YBSGwYoGQnve9HQE{nu-4=*YY?5|JLk&D$@wY4CqVLWo+ zu5Hg{Y4Nph)r~duYZ}}Lio_n=h}NyzxvM{K4-|7XijDDX{g$2JBSY_nZ6u0VOgUnO zJ}*9BUA!H1kgS=HS>*x$GVloT+rU3-OxF(~$ksSgS06>LC3z?k_tlX60xDD#(pXht z4|Is3=&@qujuP$7|8C)zTbR7r?1(Y(DpO~!7`3an>c*uQK(aj^8^$QdHXB)3jU_Kx*ZuZ zq}xDNI7ObKVI)s+!aXPFYq_Lekw8SP?+RY!!ioRJ*X3oqGv_t{Cbk~umZ{q~G~p8m zZBcC6@9V^vmRgYI0KOat#V(~yQPG~_^V)VXxqNIB|AJJQ_8SSrE&X;0Qz`H_Rg7we zvG3VdPOb2vw~^nlqR`*}+NH|}n%Z=XV{}fd#&?NX>VbrbBTI{Ra7GldB8$bTkRM<9 zwN!M3+?thFLEdg^n)z#1d!Y6fLMue!LYD%B_meKk z{_O?`UTi>_+E_sO#+cqCuP%&r9*;gb6T{5xiRgNybQt!Q0a#eaihM5%uV31EvK4Mh zV$zypZ@9!ElKU5NpynlGR<;G1VvCSfz4uthnR(|tA7Uw)@NlNLP6K=rBFhIV%}mvQ zp4^pUJUh2e3lW{d$A5(n`%_dm7rx#+@G%|l??^Js=a!m?FjilX;$Ch0NpPxA1rlAB z!##^eu)%z2K6eET+!#qBi_J)v;}_tkC_TCycPo_^XmS6N?9hW_-NP zkbTTxlI`z1eO>D?KoWLjI~z*B_t@QYYv1BgM2oWk`d#s{k16{f` zD*iWJBpEl>)bV(Xezi#Tc6}dhPU8}PS><@}-#V4AY~zNd!fxKmTnz0z* zNyOS-1sdFWiJDpI@sB!0F&%%;0SyPy1~_CZ(iyC!OZXyo zRW@DmQCM`|%MPpbn1KwjLJH7<3k8=CxhAP|zDw$sg06Su{>mL)WPecE8LjqyBg0UcxKJve{l>pGV+xxHVSP~j#oTN(Ra6cso3;E7X@``E5U=zFs5PkybTm)*hnzgZ8=1sf-5VbKXv`-2 z;7R?9mbL2cBQcVJh%>zi4DSQ;z@XHsZnvS%Dg|F-xYnsO+k7)2r&8B%t@So@P1h>) z!Y==<8T{nv@DV_7zmC4}tvC4-1?#$o=&g+m@#>;2g8n)See9~5Y~?V=-tT6f944~; zWY+Uw2=UkkybEw#dd$!w&#PVGmk`uo1zWFg*71tAbE@Jj21;?PRH|0t8UON;Pb-MN z=1{*-?!ako_uT$4gF);VprMXk2VOs;2yMVaB0jIK5JDf4?u?H-O2IhL(8$32-7-9> zf0kvW!K7>?$nSCpT=9~v%xxh`w4(pfB)<}dpo%bEdg3Z+B%x5N0iGYDfRkjFvl9zDlMCl z*Ib|&otos~>dvfNi;n^y&*Y1$B~z^tPZH^NpKbo*G|~dY_!qhCS-&9D@G5+D`N-vO{>~I`hq@Z zA=yN7|-Dw$@`M2Fvum>@ZpC;6#4ZY3m&(Z$Khh7^) zBVB~Wy_k=myk&%ndIpf3HoN;S5HM|D^;kIHz z&Zz{QBdG_}OfdCbk*El>i6d!@pa)g+6~O1ivxD1v-xGd#WFu^??LcbbfN$4sK9VYuer;?3q7u>d+K8a0h#7}$aHnRY-b=xsznizAnpYd6CLqlK|HX-pY> z+BPFj2YfLutONEv)tnaI_2LHj)EUlL=fewj)idUq-Zi>f{Z2I4)YDTFZvW~+e zux|Xt!f$7FJuqyv48eJgZ*L`L0tf-66F;!btC0?N?1vC(*zbZp$kly?{0_J2$K1|l z^VY~CJeY(kQI3AKt58bgt*R3ejJuN9ep zlud7=rn{15mt|^7mJYUzk0Ha~U~!c^>{+>deDd>B;G6AEpx?JD6A zVv6^QvZ5($3;B}}PS5=CEpcKe)@|iR<6YN)+y>Rk`p>!8kli>T>=)3!NWa~!r5-M{`R&HA(|Fm*<`p}h3 zbK+R>wkf>*z?iV9JjzO{)36V#0PXiAQ*jgSZm#5M1ncCQQQ>szx+4f5Bj^4Dz!t5L zFS`jR!Y(OM>b%HCJP>O*NPyuN9QSOK$LHWEVlTCgc#f9&6vcs&puu<_W{_9>whj<( zd-$RIwv=8G^GvjCI>~}fm{-vY?dLhMl3CS|@|@2nVl==6K(uV+rQiq5S!0#OX>aF8 zmC9~OI#Bg@HUzn40f4^AS6y4pGiq7A$-UZfYa_6DjaDa-bI669^+rEWkeT)Sk> zTicDk8!eZzrQ4FP?5sNeSD|@H4hr&M(g3yK{Xk0lG4}UL0iT%YOM|rrWgrelKue!?;2C}_)`VGLK7enQogvQ zb+l;l?CvkrYUW4?i4v_sDyH6OMQURE^`r9$qv0(C{m5^sv;%Oq(xX&M<+&$VTvPrh z4WKG3zcDxU`gVqc1*;!CXr#~KG#Xwq`I&mxBJd$sRfV7_@p{#w6wJcj+R}rzt|^NH zH!g2S-G#3+wv} zTZl59PuZuoH*rhLw5)ws=D4m=Q%jH)$diFWNL*pXF#@69T+TFMQU$Jt-C|D&8*8gf z6%TX9JcA6d{B$r>`pM&a^Nm+{ZWYM*ywA8$<{GB+;v0$WeL0%6gMTVO!#@4QFfvno zMisfz%fT8cDHRxRF8;i{wIy0)(%8_yCX?93N2ljEh0(X1{>NzJ=trp#DiuR6ug5tH z@pAb!6z)K44M9Y-r9Ke|*O1L|`F$rhD_+^2+Rh}hT`%XMeZ zKX_Tv1yRIw@vw>7-Qaj! zyUOsU7l&_CR(@{hn%M|%)}_A`9L0a_dOrD+mqi+}$EB8|vnDmN(&=;1E)=ZNXRJf* zRIcb?8J_&VTmax{$pmKU#=-|97JgQ|2NtwY5B%#`UkA^WzS$e=2sX_q{snAsdL>VO zEw<>DJh?lZUeLGW-{Y|9T1VAtC%4C3EFRs2fhgqI6{Tn5e<0`R7KDkD0>xk3en^}& z9_NRY#IGrWi*DJNjj{CAu0&tncm(#)*qluEYg|}~&I7Y;z7rq&`wFr@LZi7OR|7p- zn19EtLAAI{JykDo?kg|05?tvIDl)lxS>|}hoXyps6v27%_W~)Swz4Rd9ZD~q5+p>y z*Li91pH7eMeP<|0UqD0=FRWI7!AbN{6gRQ_qf|~%cG!_o*rG%JX01U1FVXu7;)VEh z-LZP#!@d(Vn5AaH3;w!Z!LRc1KjrAsk@9y~e%0o>6q`8G-`3$l;QADgT^!rPLhC!$ z-a$HwkzYPbY)xlS;edp}^Y3@{<_TJJ>)18mzQnJ$JPPYfSJW_uu|P*5x$ERACP>tM z!m!W`?+_w4cDuKrxdbWv6qyivz9DiY>o@gbrLZQfkkx};XGO&HkI3aCaB%x_qwC4` zs5>&k#d5!tk6K(JlRV)k*fjK|6kbqPLl<@;OR8Bf3gAm`6szIZ-D3p2s$YHbv{T!f zMD{v7sJ4Z0pi;rd4Mk(^*H2{l>c>n{P!F3kzqtcHl`G3f9q*~x+j%uLT#mm6pXbIh z6r`xnrkvvb%HLI&8cAKXwkx`_(dlGSU%My%I!?)#tEX5bJC5-kTYB&u0~MI#N}=d; zn2rvAaGd^DLQAUJwejxW!f^6>vA3!%VL8NU`P$iD*WcDQu!$4%^iiY+5NXm~KNauK zOg1<+Y1xBx;w~;d-(6O+WHEwOAokPmVSDK297oBvHacCA8+(+~10lGW$k*l}h;;0? zFFSVhh75K;h7?C-gMTFe*BO)kj7h;d^^Dyu0ZQ(~Mihnjf+HhKJ@Q*>xo(?t4S|7{ z1{T8TA;r(bTnP90-7^S>AaB5A8JSq)>ABLAj60d4RQU5I`xY({ZoOK*GWp6M()W&* zbdy}dKdG`Vf%C43N{SqWElziL5j|oVNL(5k1~u0SF62m7@Uf_=IyFY}z?UUbCl0GO zOFWnE01xCV?=zMy-XVfCLT8bt+UIc&dS=CKhL%ECj|fS_iwpD2;nABbBNa_6CIrD| z(i?A8z18F8rSNcWXAyOvapa77;dI4tuW6JlosA&Mog57_5TNKsanzu8oI%_M9$chF z8sjAx+yi)IvAgZRUIi4}HKP=HZ!UQdZkZ!nO`e;x^dy6`I=YBLLsg0_8Sg$(E`;w3 zYZ$gDq8z&wt2j?G;|)A*KhEId`t+Ei?0Uj4SeZgs;Q^rqz8A6ae*jjd%B6_vvWwwE zoWk$s1K-&vM8Ey}b!Nv$_Zs6hYI`AeJX0tzsr=D7#1;1a1*rT@AgT|=zD4U1osyRu zTzG_rUkFVP2>kvw@{n)4+mc-8`K94^j5KzsS;uv?k?wWR!9jL+1h|$bT;_g{b9Bha zl1T?4Ts=q4ij83W^Dy62C!$_$dhWCAmwFxhJCXY=9`Wkuu8v%UuNFeAx^ytsKNX7m!=8P67aSt{!2*r)bEhxtI+ zJ$m8i*xBxe2`I&0qgHVxZRChHm}b)$ zLbGjn(t-`e`}FaPH?M2&il;DwxZDh1`?oSn_c55wzw9KB|EYk=JxN`9A zG9*N-ugnPHhpJJw4XH7DiMc`Mcnw)GYoU+1pv=<7>Pls5t8=X)E^Hy4=%I~h_mM-2s(Sb4Do!7=&$M)`W?X{G&1%+SM(jrJr zzp=hv!94aOd>S$@PF&m5(y*uHD&1Bnygbq+mY$5sjN!iJcBF8-a+X#>38_b zMY612UnMB&^BwW;gyw;3=5U4VPwn#?@BfHcUsHmZ`2F}2kDV>(^lXK`m`NU2deCRa z-ERwfU{@dWrpaWgpLejhpdQJAI!;phO-Z$%e0p=$j+R}3x$dT>G`S1tSa$+!Gny4W zMd_3)CXNf1T&sVLW4hG1tBNOjJ+R!kN*+K1Z^acQ)|95Gd6h;pwdLDtWm=|R1l!bH zaQ7xy>mI1!s0ruTE86ZB2Le&q+djC;Ytrd1x8`-<)W08Jms#v%6c%Eg_M83mv-8lc zfVLPuNapkWah&*E-i&>%e{gEx;E*`>xZ$?U*mLtbcWsf)nJ>aw%7g>$Fl<~Ek5?i< zDZ2LiGjh<-I4=wm*=@$X?=K!P7-1Wt3feGaG9MynJ;E6K+_hJgp3t+`aa*nz_bgS` z>Dhk&N+h6Ui&RT(E>6bu$Q)7RONinjxrXdIMShI@alYx%sX&-}&DJ+784vLfxI=1rXAGW4G+l6)T~kUU6X}iyq6B;~Q0U z{Z!;r&L={B?JW(xi7M(@PwRG8b%Hd0j2`-LqNQaISQkmFqOSg1_bv|rQpCqLLWlJm^__xEg;_k^FguN`a$ zP*09BN_osmxNpSUtT9@E7IOlM_VFbV$5_gg;UU<=T78VwV9K zCfW63n7M&$m_X>u1=4${3wMO@Ux=k{U{BIsQOH>EVtjRpZQrIba#GrOaADB?Yb7}O zkI5^rpoOP7EQW?*9SMBq=jrw#%V4rI3uP4*O|C?ah0}yBwXnCG@DomIh}uZdeZ(%q zH1*A1VevYzSK<+(-EuMZ6>)YGF*B{hIcTTV^NWRC#mlLYSuGf@-A}47mC275}WIB(jBZv_5So=ijqHb z%S=My1$#mDtgt&{&E`YQLlm_+{ndsZ^j`yF?CCAU`4@PJ7Paf`Om+9r+JAqqd-<32 z(OU@F=^6_sUTIxn35Htn1Zg@w9^MZ{KzJh9e(ifwrmn~)B&1jK5(Mb_0f|l1Uaprn z=_~p#{)Ul%JC{4x`|zqo?lk8onaN(hfoQb2 z#iP0Qj6bF0*+R<2&X3%yR%-+|(Q!;g9x>02Y(u>#J)%SMIG^atRk0@f2BpS_BkKFHwJ80l;_t zvqGs?5R!r=EV{-Igo%S`{Z|FkGBMElkn2o(RbW1}My|aISUVr+3)*S7aK=6qeAMtS zwYxd+bVxeEDfbkaKG)zaC5u2`f5y2~>0!=1qwI4;n1T_hb`ILwYDx#%|_189(c~GxOl|s92XkHlCmDB zMz)M}t-ufec(Pw+Z;l)ba5@P>vfW(8v)*kETu3>gcz^lRm?fbJYKJ#f8c$P%n0fvr zFwl7~wUj=r7>FTy_GYmq>#q0js5dN$Q-)qAkk5)$3kM2@m4L!kY_;k(Do}cm0}JKN zbVRLoIdwP41{?q-MZ;8#@mZVM>zEH`RU49nZcN%#XjeX4bh|rPwCg(qZvU*R>8W%r zH{VOVNw%s8+Za(kENQ|2ZLU(WCui;2+}$QBT2H&aTV_t%njh+BC{f>6AEnF%px4|| zlki%kQ0shCjy)O5XVwfWqEYv;W{}8X*t)7w7dLPbIxn0y1zwo7TeY#YdFr$JZ2N0o z;v6S6h%hTC1I%Xz(5>}Yjm43v zrsj?HUTHDS@d2h@iRApw0I}fZ9Q_)Oc6sz8tfz+MXcuW0n0*57LuEKtN;W zWr4Ui_x+{z_hCoUD&(%jO2_b|M6sUQU*wzJ1qO^3%!%1uavw_SARA~lr(Z_xk z2`U2oNGc|zIReFD!s)&=Vsm=%*Z+dF1D;lZ(!X%6n{USP^wB^=(1~=T>}BQCc3`#9 z7Z*%dN{gg*i#mvcKryB~@8tJ}7GHbCd8`<{Ox^GBt5FRRycRW7UB@<>BZtx60KarC z#WM3xu%5C;HIY=iYhmSE$@+T^v}&UFrW{D~b%zpz>@s5gR?X*J&;*(|9YdqwgWWn> zFj3!elfojrXM5t&_9#d7>nKl<0Po0Cfz~;Zy^F~cEcG^_--h;Dld4FDWo<*%_+o#@ zO9WArEu+y7zqI3RH z2;o9CcQ!mku2WIYYVR_L)I8nHcU%;9+Mjf6$jw&)nl!?|CR#6%Zqdg@`~Lxj7lH82 zg6UPmfY1A1PIw{|6;Bqc9DMjb-*;H7AZwrBDvTj29W`qnhb}C9{|De=(3&FxvMTW< zww;Wm`>t;7$XxsGqP9kyP1nJ_tAH;vHbJ+>u+{=pT&sX3^t-K#R}Pa>rj2Y<2ihcGno33pVU4Z_fD9Hw*wF*dkT|`dUg3LY zVvXzo(Y6`NDO_&U;Ve+rrlpPdzIJq7J;YXEd6LGLVJ`Htx&osD&El-ef(@&ZcHmdd zUMcq)7aQDSXJT^V2JMC9{~yG*#`z$a7)Za`?P<(UA*x4bEXSb!0dLtV_+S zEbfB~4bs=S%DGu<|Fg-D2z2g1)Mj30rEi|H-VgHd@+XVBBB;)phnnu>TpUEmswGS? zU8SrbGzL&t@m(K6X0WXS;>Q9flT-OVZ_GbZ@bbs!VMKjv$zF>Dj8hvAPq*%!qk6Gu z%&HsiJ2N`8T+ISB8H?Lhb;HZD*>+#^W951yUzQ4UZBRl&_9zoLt_&}-1 z)9?DI*tXX7S`Z?ec>u_<08LOCX|)WtBL#P0?7BxLRkDn^OZyKUV&Up(!`My?%FuE& zw^&0`=lB+~NN9=Nenr|9XGH0)RC(ZxeaueAPdk{gpR4h6cssfsIJ%vG}ni)DU9d_#wo-;&j#UgQCJq8rGhx z6K;9?w2RHCut}pL8zQOM?O}J*j6TROWk8FdqBloCWm?>#}`xG)Pqy1dfBks z*5QuIKu$mHRX_cu(=p+&t;6k*2WOKu56;g)SJDN+BP4R& zmNn;-voJ=n)#ih@3X`(bWEy`c>w_fHY?yMn4=J0#D%#|Vqaq;%5&^V z0dfQWQmZFXM?4yhBe&=ZzKqa4#W7d*a-D9HHtBM}|iVI13sg1>nNXIGZOO#EqBmGjkjwPcdsm?2H< zju@)q*?37j@{7}leV`A<3^)`Ln$F?xbya7Gv#{$p_m2xacJJPyb!w0048_iUoCcQ8 zIy^xTRA-w9u+%AjN*8v7LXL{W*BMs|DtOu#?rGXke|SH_YY#|$?Lq-=ob>L;{?~Ej zTn@h~rPf$~Oy1~~KW{bH<%adJNVRB3oe#_+1JH}Pks`v=@C^RT@ivVI*-tmK=0h9* zvGXsq+diflv%!={-zpde5qQ1Tpmlf8#edh=aQl5n>! zK$?Xu@NnSgQ4M)gZF=T6?Ksu&7%jh1i%;-ssD0G76X%Kr_7LFn_jcqPf&cRN@2ap% z{@Izf_?QxfIDsFu4j;plV^}KhZ+>sa;@(9b3KKP;sUygdyrNvBwL-y|9cuz6eqgcK`92hyMvMH zc0&JV*bMSld98279$-M-DUP6`i`NOe3f14r=S|_cJX62QFamKw6cAD3lzhKAnS~6+ z+2WVuK0I2sd^oO`Ka2s+2G*rwNCauSp3nLN;|A&c0F>UbTW*v4tang4gyn&Ix?B9l18DD5-u8**jAW*cy< z^hQd8iHOy8uB(?H{>7#KV0-^4bi#TagGQ(38sD;~+04}MXFEBud9V!jvjHN*t#}EZ zFJFV({x+NTWHVWLgIo3y9A!NbNr2I7t?mo2`KU z?B#n?yiz{mguRk@EYBkMNk}zk3BR~VVl|dZPsjPl;)kbiQn>5A-YK5MSg3|5h*CKx ztc?8=^Vl;mZ$j_35yj4}JyLj%5Kf&*^O~$Z?dRl@gykQP&En?}g(QE15l1<2`to{r zQ?N{L(zEHI7~A4w%`%GD_se8x?od?J^eQc|SM!I!jqXSuR?+ z_@~ff-JleXdTT%LU9d|R;?HQ?G?SPScC6qHpT`cox{l1%L`2-pt@@syH7)}vQJL!9 zfy8l>_})e zBKI_Y{cB@wr30?Z}CzQ)AklO~9Nen(}uu zx8q9GGnUGpA-Li(A8r(8t^_wwsxzK5b~vK@>Nx}J zf))@V^hXP&Vf*48yi$#kSn_yAz9Zg4zXOA(9zfXktC$__GX}KAVD0P5vizofC@+Gvd{ZCC2sZQ1IyQnH@;be-dw(^BLkZuZ^H+PkW zw$#vEGWYE?Vpj2?$_G#CG5%{+As9a2Y8AROJWL@4=7t4=3%p? zN^PM%{72Zq>E8K<#Kn$>g~UoG5gZkp^~|8(qbKXpg9A%f6j4 zhO%OpwRI95kR4#6aomqmEhUHZhekcM!52Gcp!`?C6H1&U_t4Nzi{kgm8ZrTayj=@+ zj=D78LN_gM718hSc{(TEJsmnVg@*&bW(0M8w1@acH`-I#;^hhpl_^d(!!sBKLp+Vo z^9T}i@$4qa#&-R-Q_a`=Q!amBp+Un-wMC#I+8rJ=7~((bWT=L3wyA#T4l?heXprWs zYAm}dwet-`6IonU`^K*rYz}bTYb@^t+(Lk9XsPMZ?2oACgfB#G$oO(MH`D*9%|whb zO#d2PN@clWDye7sX579OGDTlT4lDi9oq?Roqq!{cYv>{($N=mDJ!{L%gJ?$9keUH~ zJHI(bZ1D|IjyHN^3cgCIH#wjVP{H#ZAUVkO#LkcnkbgU1Cat0lkT6-cVy>#TDo=JQ z1JGA zUN^|~$@roNtY-w?^M&H{?^Ay8qR3eXw2(<-Vt3%Q+{omM+xKLEK1aHPOG}j=1YvCz0D5q$lzD}n-ST$ znZr?{sL8O_Keth${c5`|(i1dX1d^V`kVW+ouuFW%x~66N$`{%$2xt*!S7*6W=UQWu zrM85vQr^X?R1tZAC`SI=4ox<|%<6mChomG|&kW{~tW;1pat12tcE2J@)s&rLZyZf# zl(q2nkAFURaJ7eLfZv&f!AmNSvSug{I2qgYt6z7zP&Trfy}#HPAu zqZ(ibC+WHF)aRx;WgD#1S23HR2#5djV09I`TN1SV|7bd^fVQ@1UE{Psk>XISl;U2X zxD|JIC@#SzxH|-QcXtU8++AAS-5r8kFX!%a-q%C&u*O{Dmv2V=G1kninZ z<0bhyC?+aNledYlI}Hun;?UNWOaq%!%G@Q=8B~63_dY`aEE&QhSHYs_gD^MZ>FE8e@NcC4xq8QQgb79tOW1mIG zqSyNWkqFw`KU*vtwzto$WyJ-m&i8P}{en9)VPoUOoG5(W21cfD^vh@i04fz3PTpc_ zy3W!Pw!-9g+W9RQKOMUjDq7Wlxyd(2C#l0+JNl9*X^HIHmFW}Lx$Wd`HDA$nRq zaSd$&Rju#kkC?c%l2x`J(H9Kq3^pGf+_;?8&>p_Fq5u-0q$s4wA^FlRcad;|;WvAV zZuvrK0$(XcC@=BT6i#xROJX+f*b8+L-A2X1x7viDk`2Hk?|KQrpX$=TLgkIiZc&yh z!zeiP1xztqCC7A^-WjM3TtWjAI&zwCSCjlA*Ww)F{tfS~u+g6&%+h~*>DbxD+R9v} zi}N4p+FqoKV`+XQI>(o6U9)R65M%2yXDdgynQ?b$x8SbQDyjcaUeRgj)y~O!Y&U%e zIPAdYe4vfd3>UMq^Zqk@N=K`(_|-UFgKJ}NN>~%4CN$nwtv$IE6$q!J_EQFi zC9&2j9Mw6+bl+b?;!ZqPRa%)g#ftv;v&J|Kp112?Vhpg;QcvpdF;_zPL0$Ou)RYoG zo;{aKsj3Eh`+I`lwQ$+vsUW@$rUYay&vJl0=iE{K@+%F&IGhA|iw&QXQ_jvemLZnG z5zCz_H?1J~yGA`Ax{;AVtG3}FfmdP zrwI50drP2Ls%*@C;DAPmgX9Qm5-O8;+ozBgx-$c5x1_p>TikwxHs3Fa9FK}T#qY9& zH5UR<<*DT2LrhguTf&Ce-9$Vrof;v35mGt{Xb1)Y?0DZD>gr0yd% zXJxk9u@n4Yg2U{~2>^wB=-}ql8y`^5MyNZPd*Wa{ZdV7~&oJ`_H09xa}q4>KrRBWG1Tpi9Emjw?YbcmFPD01ljf4i**f{@;nfk zQ_@PAe!MN?a^Ag}{ot%_wRgX@rMe9iKCgrCDW@xlt)XJ*yEZ@hn+M7oTeSrv?2`|Z8|1s+E> z=DJz672!|FM4}{9pWV>ld(dUA5k*53X6dypFjF8bvP#~$EvBWbpHjL3#wI#r%*WgP zF6tEZU{5&2YQ4{Ap_&$A3wdrbzj5Aw^{i*Z$Q?Vnal&-wbNYWs6nX~#C8}GLja7H6 z|Ir5C3#osLH3;5r#Okn6_d*2q`vcrbGkk>2Lc zgrrh+g^+!9Z0Ho10hbjWvUy^_=(g{uq}8%}L|H&kYzXe?4v4R6#WeATQwn+GZSh_^ z`kyBgsxqqDTx^%qnd#gId^+9w9dlBHYO3tIkNqrc)4U6@#TbtRAETRRy-1&hO7WV! zULm@sq-0@RrxmX0=e@H_*Dp~d8^tRQf|n;1w&41`SSV*sMIH2EGqlsGtn48o%olA9 z!&H*Cx^FcEbJfIrZcoX{mwZ0ZkdpxY=P#txXJTHAY`wFBs{#~!*>pU78xJ#F>0rhw9C-9~? z+p1$Y_r7|-d^!1|X}FrC^M2*>4BgLJWj?8T)J!QqGWZ!RaFvr9|MD>r9Q%4PhKb~_ zl()l&tEjic+xfZA%t?>O_w*`B@Yac|Cs+Kt4V%}NB!2Ky?L7LkgNioCw>T&ke@o>e zuT`>+JwSYwPX`IL4yz&7YA6-=dMAqPFr$C(Lj`68i&0%~d0$>&Xy)4T)V0=u`qf37 zE=-aWUs!x#Wlz`*5qewyCbv=y{FG+JNa9di@1n1*_b|9vk4}g-55N^l(P#5Ncf>dq zYJ64{M~@7eZ~Ws>u;N9pA&Z+VLR*&6wR}TbklgDiu8$+p3V|_Rh4lg5DXkc$9V3Yi zOXoZ{s!rt@5n+1&^xg$BQfU)L;{U2YhybV{sYw*m9eEfd^+8Npl^T2F~^5!qBx z(b3G-9@V8(X_(EG1s{0T55+c@-^}e~_#kt6iZ}H9vEr0&5T7_hi%#c}C1nGmd7Cyn zAveuEz%b`YyR<9A(3a1wdASwPDAQ(@XFm9c>b~0VcoTn0a-a=QJs>wF$kLSI_X`k1(!E*VUxNTR3~O;4^2jz#6J{ zl6!4L3R_hUeYfZK0E*xGL{{x+cDbY8pWtS9=9V{Q@AGSYNm}L@X6bS>D|}qm@&jRB zLO39U(qCIG3dyaC?zmMe@ZZBL8uE=#J$6VI3!vaJhh2830>XjdpQ>hROy%?V^SD3{ zsYaCQPlF?o*azyzxNZb=Q6*N z{q7aEUC7tD3xav=ciCLn+ep)4XZGy+Sj`qpaj)G;cK)4>B^mUX>d@RYBHg1jK@Uny z)!P&K!>2=E4~}+SgEmGU@9W(}xs}hK`L035&`L+$^%1AkU(?dm7dEr`JnxF>b%B14 zbGxj=U;>jEr5ew}*!sjN!4xT*G!?ze;WecqX+4_{4?3!h@(xBeQ&Y(!#A0DI9=Jan zX^zrm3Z^mI5DDA(7Fw+gP89=6K=1`yx6Q6WJ92DRoCGB$)OE|w17DVx)>NZ#usyc+ ztn!&*Bp|94NFBf06w7zAazg3p$%qv6<1nJ8+&#v#c1$cM*R2ra%iVXAz(xd&80WK2 zEU`3`rEq57f(A^v8kd#rH}p*P=UA-F4+7mGUzsX^-hzh-=OEkRCKpx~)5+zt41Q@IE{mfNEiW~hQTrZyr^;=s+IeQ1N!8Ug`w;E9qk|MPzb7=RNTw7-XMB+J zv&ITi3mqOG8#W$Z4AJNV)e+KUpRkNyEisWyAoa;qC#yleJL6LxE{4l+W4}0b7C8^w z^hcp>cN+RXX(mh8hm2X$U^1BYABXx*n*YFLIVW|_cy8dzUPFDaf#XexVlX@7fQhAW za6!C-q+yrZ0?SmG&6seHoRnlpe87bJIuCpv9%7n(#SM%>5uNigeLAe8DDO#N-dcP< zLz6d6^_8M=R5uulEnWH7lGN5*1jutMLNRf#c-KzlJc72=2iO5Keft7|dMpboFO~}t zHZRQ4FtMMVYA!8EOcBU}CsG#_Gh?8K>_AL$swvwRw%FG1ua~5C*l|~53v+P7rpUdF zW*()>OMJ6VxhIp77onZ-(A)pX?WIy(#!Ke9hhD{x+mY`7-+Ey!`HntYj3qy?{hw3y z{h4x^T*9MIj`6Zzrmb8mMU?M1=wCTsLR_j0kOy3-q&YGOr#-OEAAu6yRm`1f=lV-!7%it?PjH4ZR=nchGVhjHKgtot zYtnyPT=T5>Y8u2nWvLN6y1wz8Y6nFxs0dSsi}&a`NuPacm%t6JYZ2a9h4q4KUoclj zD_OI-4smg!4YMoZvFz8NS**U%neZ*`%`hfzk9J)}N(2IKd7m&EU2lq5EU!Nk$3YPMaKb>xe=X$}-b)b*Bp5B6`RpP#FT;r!l)_IreVGN_rUVidh zpGBAPEYjhLNp>n+lSL(&0UHy?Uk&;F6QDQGzoF^HT}vQ|itdGccEvA=Nv;qg0ep>Gn=R`(6dT4GJO(K=p$BmR{V-Tp=?eQTaHWDJB&Pldv&Pah^lnL){;hEapMP?+=(;~Wgt6w42gn3W1EncWEuBixiZen!~vJI zt*!f3@z3a*^O@Aa;hI~jO+JW_(%|x^#(;jWD-~ekJEN*Oxg}}&!VUu^Qpn|@MkmQt z0fyir@2SF-VHo~i0`nu%rQG4Z%Wg-RlNc)#B}&oYA0|tcg_U5RpbrP@P=(FWXMz=? zXvwDjVojSASVrV(YW-u1c}(;!I67mRHR$ z9e{4b&S$QKW2PD+W+-T`p(D)mAhO_f2T_&0DpWlxc0NKX&FM${e}He9j!hOFMXwGp!$rx#IY zt&GIsnqFTxueB;mxB5GZF4`)Oc9EIbu7eV+$Pb$r1p&1L)#U6bVuMkrTj>9JN9wA=KfZ61eAxvp zaBonM1rnCLb$3HE-x)rSSB1N6IMQRQ2p#*yYDkw;o32WBs+9PrgJ)r1mdww(YC@^q zl&s0g2Z;w(H2QA&BtXs2UmxljD`Y*liY?3zd-OpxE z`FPDq33C!YX0Cipq$Ql^=m@?v*M(HOjXk8+9LKXy4)3;xmy*#RfZ7kV$lZl;u>}7? zrkvL-I-k(fU6iBl&z{~*)C<#4;o3G%JQuw9z(zDOKg>+KuEN5muQ@~OesyOsq)+gP zbdyzdqd%GyFn;K4vepv7f^?`^c;4em%e3S*tvJ!)_CkA(=}hX%hQm1PU>Tfr{VZ!*D->5TzL?P&EOIswFKUy@TaW`rf4(Ou=c>GA`= ziyf^9hdIl|9@M!Qty`fGzeK{{2TOotbe)Zu>}U$NCImm7;8;>hbFP-A>mBV)Ms<>9 z8}93l-`}-ox-N(onmQ$<#_W-izTJobTI(XPEe)|p7wbuoaSDJvqcF~X8!(p+r9|rk zis}a+D9WrzD?EoDVtd2%={b5NoE-|JW?%HH8}zwEntvbutp3}fdIKM{@)P5>%Oyp% z#m@TKvaH}anP(K`_INo_T8+3>Bej0FQh7zOl96kx+DX?x@o5Wol)ltn=GBHUutPt|X>>&^-6sh(Q!}y{&vJ zi65=cNzQ*ncHLX-#N6qEC7vpTfQ?BaiNsC`_dYA$4hs23YemfH0j^bN{Ep3;pMF~T z1}YE^79YN~7z$6$`+T^WFTn~8i1}P`s=Sg)Mt~trq!C+PvNqT1Xr}9!#?PZK|8GzY zx6J^$=vH-gxn7BSv(niE<7vo>1|+H+EekvoH>rCuLS+~_C?`m9x?aFNZ#_T_eh*T= zvc^ah$sW$Wd47cm1)=sS)9;qO+Lm{mW2s?4Gf&VjQ#(bmni=u0EkVYS&v z@-MneyODT*)gRuM(s)(T?XY{k(e>r4RRd9mkYn`XKGE)4|N8aU&@<@!-ywPS?-^^9 zU>W8Fm{wL=+%s)lt(o6k_R1PIwo-E2dKhdqmo3!+C0;8hp~*;JBbJz>p>teu_ktSCE5q*ZE;`kdMPt zaTq746Ev23QfbxT5=bFh*M-l}D39tmHrbOzZ9DOKRYlP{OUiaz*VT}`3ZU~z`FX|1 zbimsX1$=wCS^M~w#Xs+3)waC6@qKD6f^C(huD5)HxGKoTdjW!5IIc73^!8PKx>`s^ zXf_Qlm#PeXu)iA4_;14N+6~8;#@SS2loQQ5x!BGE2i0d3>s0-(&m(%Am8l%_;R)j0 z;j)TMC3(T|ZB?VZ{%5nt?HuYq98!q^(Y+^$jTa~Z`E9ph^|gGShHibS85NgS?*9ID zu>pJ9%lxyvTSD?f4Q50YDtC?-vLpuy)KhrW)EAzDUv4I>HpXAZIOQ9sKwyHVZ2Mlme*@1(7iP6sqTDlyEZx|4h`8SeeZxG$-I@=q1Q11*OM zfCb5m#b5+FKKkXEF^?i#lnBn2=pZvvqi;e>k`uXMj>U-+k@${J{;S zXkB=gZnQz;dH3Gegt)orDs+#w9{MG(akUWp(zV2AUdoOVNq{Zyf3W5SO*osl;swp$ z(Aoc8np&lQa}v~?WaS+2|My7v=S2@R<5Rbw_x739War2SnblU0*O_r$_~T$N1O+cuN-p^BTqR6i5$4 z$W8?JP9)xIoM?GMaF^n(X+8`bQ>3xBCZ#L6DcnXjGad{)Z=xIn9)R zI{r_1g|6ao^i(Q6FL=M8n%pO?<25xSSU78pA5186aP2;KM?cq3xXY&11k6MT#_}j^ z4{#3MzlX_wDqArQSG5HD*u^#Mx3TkrsJa_WfpGl1-Ec+# zvyZE{xnXy|gcadlPy8xAJj4eokjq2G4Bx#3SFtSXuL{}X=S}9G97r}7@e*VA2sa-b z3xZ4;fQa2AUI_Cl<@7t17|cb-v$y;n|2P%D3rp1uC>_hMN-{~N{kp*pyF4-T60+-u zYcIapOn&Cr%d|cXnEzV8Jl|7i--*_&L2>gd3}|wf__#X9iCls!d(Xp8g?&;D)#;)p z`FUKV`BkFl#{RVp#@AKP&J_)*5xWR4AcgqG%JT{uLgA;>*Qt`!?sNaA8FZS}{d3oN zr3kHPm^V7}`gbE@}+10C+k>-r}AzCqsVWcE2; zY4If4Gbu0j%DL~v56qdnHVavu)5nFe42^^7cO|PwqB%sZ4pCKV z<3C<&mrHAox??EY?X2D?4MI9NldQJ1ewC-kz|9unQ9Q$vEN*Atf%p^d^E-nMC;Ms9 znxs$1wW=HRho^jLnuN%6p?;=tM~uzvVisoszgQRb7S;?UN3KB-&k3QjllYk^h!UH$ zFjg;9Ro0H=1Dp`Bq)3y>Hr0l>lNG8;Y(j@lUCX=LW+ON~DGT7CDVXJQuyY;#T`FsV zem&1@bkHPCjB>@dmq9b2-L}5~n;7LTiNK@E&<8(ySG-W9Jai+L(f)2;9D0)Y8Ho1> z9Ev}lNrSpi#PR#92m9F-6V#%U-W`E56UG;H8lJyu2E?&UWFQjztexOB?`6tGCte)z zeq2MZiwy=N zsM*ho(c9)J^LIIz#JH@h@nbA6 z6@7~uy=~QGpff#9f3x=?(e|L@eL9KD#A)4*VpHl&B)+RIpgRLlZ-XCB;!A04(m~G( zN2?%ly8Vw!8uL{^-E75{+Dz4}|D2A_f8I+Eu60W=on2Ay|FQt*f%c+@>!p--hcbu!{iC-3wSi-ndo4hWLzfGXsEiMMV>^FEd#YuR*6c(2Y_Emn&) z=sBWl_}0aQ$;W4iyavFK3I*MedLA4A|65_)Ca7xP$JfG(!1zqge_qj~%=^IR^^M$z zoqQLx-ii8L-)dEaFG4Xt4j+I9)teNfynIg1Vu%l?;NtMum4%ecN~^#Xp>WInbZos` z3p`T}u|%|V2+dF5?(*&!!NqDiE9}59%A(JC6|wgaAB2NF_dhu*EhXB7q91Hyul;L2 zmRs$UKi%UM@^K_NU1jRx7lP?Mntn34!72z`CT{ zX$R@$hNR@(LeIP>mkC2MAOw;8wtvez)Jj1Slv5gx>K>TeSl5m1MDi!&E&Ca z##vXG$Z_L;d+ zRs+-Y?s8{)9DL)D6&GC;RYzbXhF%wy8c+PL3%|6ZSrLjxEe!8z7VN{^T=3`0VWflP zXNj2hRFS7;32Ts@DlW8Vd;-z)hSCnHDjIs-meeRA^sTBEJR$l#x0J@LLB=D4A`zxp zr%iH1_@)|piOzQXV&HOKWn+Fg2`n0@7foqKnOw-U4}a49I?3^`;SUaL>V?fL`1ut0 zlLbD4IM$OWK5D&w@dSJm5b`2`HS(YoxW56;PD~j!RP-xML^fK9+G1j+$yWGh_z>Y7 zL)q0_)t2me^CK`90gakIfy62h4PerfqwZgJiQI3Z9 z_EEi_VovrKL@Vv+OP-)M~Nau z%=1;Us3KF+(f%W}Z1X>vYnqt-$7zZ4G^)Qk`DhM_BmS}{4Sz@JqwZ5NPn$!LamKBJ z0VhK?T;xs6{NckhMlWt33NuOhqd_clJ^D#!lqL--QD`uzNk*q`$;y1RF_VHJASxL%pcGbGphHO~s6 zofdZy6&F{O%ES#%LtR=J*>FX*&8$OY-xOGz5B^I>qS41gk6IMtK`WEN?nqefm7;N( z=WG-ji!o{LvnfE(Rsz*_=^$pVZ1XQYAZ6}U->s5#j$4(Y1y;OCr->bntp^wO%87Dx z%!Mk)AGN9HN(Y*v3xw38tRgJhs?@0Hmqoxhe4siLi$SSxS2)wt=iYrR=#HJZM28nO zoQf-%m|>ERY9d|bf_&(+M?e%`D~UnwwsXI;-I*L!gBAOrRkM9U^0)Oxt*`?)kRCzZ zx8^Z-4hylwKiCbp>x8Q0NJYzY8HX78{`-UH<$n$|*7UJUx(AT;h3Sc?T4E1R8eEs} z7V8S&o7WxR5&s^2-fpx(tU30M=FNK}uW)B4_ z=nv|esW~7c%c;wtjRahoifQ*XPzeqWS{e92)iA1Kuki*fyqjM6e0z9^(n^CX>1-gz zK%6%;On9^E#fvI7W|;N~Q_(5iLKGMTCPf|yctP${WgH2|5V zDsW;;M~GAYI^Z}RxpO{Zt2Od&xHh^6-VRO^8o+NA3!ERn9* zGX9=x!KYCr`g?7EB=R5X?R-u#?^1-zf#26(?jIjlBQ#z6-L_6e-17+tMXOraB^4t> z6wA5vrYgrRV~DAb?T{tZ1fB+zX20Q&7y!=f$k<7PK6mAk?JL7$f<>n_odvB#yWI$U z`$deJJFDw*V-|cuvEtFBT8R7Ak7(GSZyRpZo(6V&-)L~X+89GMjkz`I;8vx$-cflF zt~T@-RSNQ5#C46l8%GD6Z1s^lBJue#DFp#e7m(w~2!G-Zwv6}EWg}qIDJ3ha=xz&; zy__eudiXS`P80~|stXj`8nohX6p8-%!Qvmg#^+h=5eeXtr7Jk1rc49RFqJC91GL7} zXY#c?R?k7IuEicvkw7kQmLMH@gVOhmNl~#nwSwd)$K+a(2F}Tm)@s8ta*gcw^Uwq) zyxxKNTvz?qiey*d#bNf(;7E1>#WCXJmq6iWr^!_j_X6t?-Es^<5(r!1abZ@m-YiY0 zF%4%WkewB2z-rsO_vcV`VX-=edR?=5nLASBHv|-m*P8;&XTfi&Dgxdv zuF(FfHbQ==N`h;hAew&H3{6p;{wSYlfaBAeF-J*_%dF&hx6`eu>w`xX5DP;w_w|E{ zq~r9g?&nvs_bbL3O$TQ=$^?~{Svx;`kx-Y3$|dpKxY4!r8W;0C5u5wtPW1eDnfHfj@5ufp1+xYU6TD^YaWSuHT@6qQ3 zN%Zk%R7g8XD^YNY=EO!Np8jWrst`p*Xh2g&yJd72i&zB%)?%vKP1zn#wlsixzq_4& z52fl9Mr=jnI>F;pW1Ro^f|_Eh0&&zgKjLooXHD2&7(qt5(ou)2WT}xvEPL)(QKsS? z%yS7XOg)S9Z-b*RR}Cf6V%)13o7pg4Cxt4}9e1ff{i66|+hAbU@1|NB^@=CQ!spxxFQuEG>4dXz2PD9*m?MBZ{XlsnxK z8)FmcJywY2QiNj@g`FoP!Y$FmBLX`A1PoPN_Z*ox#Sz_q%nQZ5}Whdm#ibjWbZp1z;|6jHPNX$9sX!edzj zh;2VGTm8AFNyEx%tR#lF9IU*CLs84K=8Hp*0~uuwW90qgSK2m-xM*odl_3ZsAh zQ4L2^0x3plc-#Erf{lSbw!Hbym`x8^%URBYI^`hJ~lZo$?^%8$);M!e7uGsG10p9${V117EhR9Bo~#upuq4%;GTBgel@lk^zRUmjy>y|(Z>wdS^EreflH1}otYPqX?y zRtk%8SbtYq4FC9h+Aqe&q10k1NmxHEM;N-D4(>B(#L+aB=+_ia4c1I@6!OjHP|noN z1uH6+o)XnD5ng6HUf46DQ?vxKC^09P@?)7W{-6TqeIDghoKmd#^`~Rcen;~IJipI& zkdtA1lRJWPy9ViPuX)A-=}a>eZRvQGJrYGOv~*}FvrEPXXY($r;MLfFK;AV&!mQH; z-gK4-`d8++xW|-2-~V~?p!jZJig7^*3)laq-x&N;gx_s?H^TF2zQO;7=&2t`rpvKIu1$t2QK6ZNs7Dj2?ziw!? z0Wrj@LgI;s^vCFlAHPv@XevMy{XPA^64MLog2VuyP?@7r-)_6B_Rzk1pCiVoO4;So zk=-m4G-MVZAN5B$AMFJ3HK$Wyem=~e5s|2#&T&~oMy^Vtvyo;akXAj!yr+8p`){A5 z^jPvwj)0TZd3?L#{SoeItE*;kPk=T*8RQO8ypKH{=`&gTlkTG~k|gfGV{;K9;X&t{ zhmlQ6%7ReqT+4$G!CnpX+@s4irpB5aopn=11$XbjTvV;>9Gy>@@t>L-a~b!H3S>`? z8HVZ>)Y{D6q4CuwgFKrck14>Ha*YY}U=IxCLeWE7f;mZej2#zj!}S!eDDDTVE|H+;>It0oYU&ro&dEdxEBAXdEq_ktr$KuL zLXgRi6IMBh{#w%#)tvu0!`U=yD9+dS&wY|y<^xf-MW|&D3dCcBsCeCdn zIAf)jXw}bPx5Wae9_rqtLXC-^T)=(7AzbWQGdr#wJ&ZjcEiYFV4g=930SsG93y|34&IQO%f zkSX0Qv^_#M*i~_4kp*ch0_C?E%!6}jV;Q{*a%{TJ!e|$I9FCRkcyQn24xRD9&%&U` zBU5cys9Mr7L1>C#k#zA$Sj(>tohyK5hJA+~bE!0qt1f-G5x+iKux`jLJ7pk)aq;31 z&(yp&66H2t4=HWVYkAKGhh(DQD+HEic185ykhiACdNgV7?J8l2Bqw%KDV7+%U}Npm z?BUc*AhTFa=sD6-7O?tvR4fKKK_hl%?`e^*{WQRW_x&$6F~TO5&N)r=nthm8uM~B@;wN2_?zrYW0crH43=hMd%v}y^!BY>+?8uG9#fH+o~+?@ z=Ta}9uw7EuLWc#=ev)+thcSvlGL<7}7ay!X&+T-`nb=HV>K$o5rTo4?>@WoRu;HAL z4-FvD3tBw@=#v+=hbDLt70x&!+=Y zGy}Y0)=5|W+-yQ%VbR9|%|~4=1yNSKCnH-1b%k}*;Bg1z&UrEkeMw`@$*E*o&%t#8 zw2Jq(ir<0WvKl1%$?!$grL%^vkL#}0itF9(khZoJ{9n666%GI?)K^KHn92T6Jp<7C zFa2&$ZuZW;`R_a`mdWJ~p)le6u{+6-@e&GAZ(h*n9QsZuDiTm}rpVhlCTRyZ&j=2K z54Wv@g$5rlqof|3$m1`WO)A=)=^EN)X>XI;Bz-^R-YVKA)uJb)tws9=tfiB2g4D(=I~wOQFh!J> zl|~kw^TqY3ex*7Ky*|q=eA;44PEh$%Y_6AhG8_#1zH$JN- z@l=9qt|hBPiLUX^D~Cr5nl|nGbC)I#DO(Vty&yOm?5(qI(?`Cai(5CGFE|5(wPUU9(P_87g8{Dl7}g1?k205BFM8N9G~gI&bHetSZI z3KW4~G8XZlx%-2)#l>Dy99{D$H9bjTPY_pd`l5`R$OEO4;+}4@JxHJZg#eq;<66B+xLU z!EqhNuLN*=e(43GJBqoe5+-fWN32*Pj?xi}sizQ-@Ny^p^Lo1ZRDyt|hYY)}_w$$g zHqtmwJ9btTx$+nG^>!y^S~bE6F9Ei_Wj2=npV~*tgCKw{l;!j9V08B!-Fjx7G9^$D zh?Esg$#=H9MNUQo#o)K_0758`6a6$Wx_?zuo}Eo7zvbx4fgdfEayZgq`<|EqqFB9h zA7N*aQdcbuh%ro!^~f%%Egy=>PT#mOn|QDA&=id|kWUbmr=5YCLx%Txmm{YGTO`J| z&Pkpx9-;TGGbFDau++4Ut z+#Fh)PHJ3Mq3#(;ehJT8)wPHpTxR&~0jNPPdTkjbduttt)VS59eFLbXMKXkTZDzc- z-{%mO$X6hhbjGb&cCW*$>g9G0-ni#wUWeS6RAGu+Cu|GvdDyCr?MJx( z^$s|`{W*>I;3yWHGWste)qpVgKW?uv6-RT%|Bgc@|9yhNim%OrZ);>{#qq2}2nQ!b zs}UHNDoHILMlawTy-r_YjwUET;%3{r90qVZQ`!8tn#fmipf4JX=2YxAgqmY#gVA=j z&DjV+vFb!tOiZc^y?ArrnE?D>>{3{dZ@rTCK16ArA_5YlCoU>y?%64=>RfL?Bjhwo zp)ePVo}{8eb8zsZ{|TGIehJZb{ujG4hpP0YVG+c*d`=0S3G3UGz=iU@xFO`}4ABMU zDpY|naKBn)2XiJ4EZ!ubjH{h6E}3uKD-uGbouW%P{4oG8 zd3>w?_{^=d#|a@q$zwXqx8_2l6ptGXZ6_E~E7S7*Fwt^nIgZp)@3%J=JLnE{?|9fA zx!rC)+eneG!8ga_+9ew%qeIt;YaC!N$oY3%;u%@vUf=9y6;8+-&t7*;F@n?!->*Ip zmuo|EGf5>HTpImVtgmqeL4k~f0MNTL^fumXc0;Qp^8ovP=b)2;J`=t^RETYpf9x*K zpQ5s@{PaZ6E~-=@HAjQ^o2K4q#=6V7%|-C7Dn%wbsEpSxLkVfq}om?s9-U_;%Vu9+H^# zR=O$%YP=q;u7VF1A`cE0P>VmR-_&&wDvS$x?d^07aCJbMANFe&4PJbn?~yyLe6RfN zerkF}zUs$MH*xQdc%vT4EveAD)FW!FVGyF?p|Nnb-|q=jC`N{t*zrN9D{t^|nO3x5 z3o-SsGFY8ZFlGK#&WJQ@IeiJIHC~@_L5ICYfYGHkrV(=JqJJ zRx?ZPEP8OKO@b8lD&2w`lnPNO%DTXrCDF~^Au2VE1!sulCK42TX;?>DUm0cz-#!N* zHWH7bhGguU@-$xGpFtogy?!v!+CGFN7{(iJ9tB0O*G@E>R9-U4unUVEfmbtK?vRH( zgLciA;f*%UmvQ`Rye=#8?Xjkc)LzQV{`KS<-=?kyQG<@Ihk^~amWOrUUHhvoJHAW3 z;p#8aVsi>-OiVThnHY&w_lI%NDE|*!#*l5T;>OaHuX4e@$0+V9`%_>CyRzK4#HHy6 zl&zd)($gJjtCICnz`PmebR-qOte`XhR}sECfQ*|WgM7jfhgNX9Zg*WX5t^qTifm?3&%i$ zGV9}6z|2Mi*T!Y*vM`awR8!TyPMLUnbo0d4!;yybdo=~yHaA%m+ye_wcP=F8f-AgX z$(`R`!=CMGyw<_+df7~1!#pwlB`JQH(fH;Ooc%zTbzqA}pHL3p+ed`8o?>nqNH#Rq z()XJb?nd|+Shq1Fx$7CR9k!YK-tj>XHtbqBWBE#;1ELxH+U#3;u}rk;4c$?16R#pPa2;Q1riPt`fP{dwaF{2Uqc&tD?a?%5kTQt;&#InvGz z-n8+>Dqs3T$iJ+OR_&Mm8Uv>FN2-d6>xvXx*-l*`T_dEHam*LGG&6wl+WWYT{FX)? zDQH+-UV?F09KX^?P1>mDQDnj2Z6l2mOl|O{NUoEn%7=O?o)wpb5X)h~@(bdfA>tgU zvHD|+Gs09GP|L5&{ulXNL#0Fj!;`r9@V1(<4z+~PyvTq6Fm;xu8CpR*1FVcNbe>`h zM;k<6ANrA8(j&izs1KLHC53pR9N@eFMqiP&PDnjS`2U5$IUuL|2kTW^~o4N=u zOmlM0*E>Nk3zXLnca*mOmj(E{4R1yhO`I&#b;+v{+4Tg)k5qWse%x;80zw|5?#m+o z7#!(zI{IFjckZ}d)x2&M3@m5@=lagMny6182Ma22>>R@odHKZ;9-;N3)9I7t8ZTV`_SzQp3Jiy3m!b;F!f$ZwMY_b=(*Hh z1BOYrO`RBs`vu^T@LW2Xo0KfHgv_tvg_djOX$qspr@s+k`ybKY?}#ZCs|Q(C_-SMz z_h!m6unxp`^uL4`nG~Kao}lzFraQyF1C?UVF#2niZJgB6RA+L0?z8IFR%P1$K@XVl z{;gy1zb{ev&whwK2ZB(N82o=v&18%|IRn&9?zL?Xv&?!-Z)aUktL&xqe;*VWG2brU zK@ObEU2~o8=s2iK<;6^?aRXKo#E8XLQ0&m*GfsX@ilHQu!P_Lu*#L{TruUe^=awj; z|3}j|1y>es-NxzIR;OciY}>YN+vtvM+fF*}*tWT|V{~k1-<)&qU-MjsOqS zU*q`Hm!}#px-KanKrRsc*mWqR>~kpKhviX0q8!wA-#+fz#~74n5tB$adqC#Yb(yHV zH7zUxVWIw6$%JZ-j4Rg?G{J5-pP-jMV@c->e|Q#9L~_UnCf!Z`Vy1l_VC zls$Q2L+l5BCR0Uw2DUT&>5%5OFRG_t0tlVBMy?Fxo$gaP+U+&AJ%Xn}nxAJNTk+8M zP0qtlV7zWFYBCF?u(d{|)b`t{kKKXwz>azYS5-Rm0K>h}N%N%Fhby76#P;3aqWEzx z)`3k}jmH;^bY3;Apnvqmc0#zp1o?Zcs7`guTF;QC``@8c0@n4
A+W(w%PQ8XbR z{=O_excbs;O5Jve_kQ_pO6_6MDfI+S`xf}Qnb=Nli8}H2*8Aag;~20rHox)Ye!UjB zL&EomzH$@+0_cNKfd8Vn?cMN9G+E<=heE?`F3TKVc(3e%74*_f0G~xO&}4 zYPa}ctlJYT{j{5)6Vc6B+if^XkhW`=b^>-J467{$R_oibw+&y&4mPUdb8rWno#_(+ zH^W3Rj8mK`ksBKwDW2u)?fq!cwJyx$TGz9}F6l-~l^?y6;M3FBl~4_x>)Q9?`}tj{ z!;}@Fy`)bhcyopm2FF}8$d9?EZTT*GOpMo?r37HX>^c*r zd(J8v=y%qo1j4^VP#>KQAT68X>6nqm(T!@8*GSvy(6XQ&iMhA7OVFu&&TXv(;jt6L zqB*wH+ohQ{^%r2Md*4Ca(ODT>k4wTW2)V;lOmbOARa2u-Zb|VA1bs#dHWM3&tbBqi zCHtCd+W4?iUCa$Im|N#rNa@Nh_!#(CYUk-o*Gf>k${DjU9o1geuDp3}de7Q3-@wTh ze(?4#cl!2Ru5a%#zAwV$$xiqXtoGC@LR^3*1aGWv^noUHleZpdh#X2#jNtgDFguP~1lmwK3hWS|yTAJUhjx$Z~PddKB55#i&!MMLmXX2Yy z@~kwX6n6{>)v)ND;jGMHy~TX?dUlnRzPkLgB(P2>T0|c<^3v)BVnP+wUq3YFSHY4U z_0yxc6D1eoWhHTfS9%S{jsMqGDWv4B%Z$!~`2e4%52?Gh;~1IlJ;qRy5pgB8z>FzH z!s?8OLiZ>{ZI}+`S~!fm*w5d*+dp-uOSQu=eEASg3Qs?FuI{$%DL;FLw;2Di=)X;& z(x2b*{Rybh8C~8w^Xxf=#-L0zD%}f8 zMiC*|r)_U1;G6u28U5_n_CX5#wfW~%G>pwCx#nljF zL#5|-#%Lubvj%GR3_vQ|qIoLjuJxD@KWc4);-CS?whH`IWyBV|E>>O(-_Hk)k}!O7 zPO9*%K}Rc(+>@;1f)?WMP2UYfGeE1rC$pP)yv>XMcqBXuCc$>beSP2Q(JB0o@)$;1 zlE7OM|E0X!HVSW(prg3uTMW*d%J7AXzZFXJ}cY|KMP`M z9K8YtV}`FN*q&_z5ROlKL(r{e)L^x98nCi__3ylE_sqKjbw0-X$HSo0Kolbwm zTaJqtBx?!{qAEZ_7L)SYqq*w28*fO@-a;;^{k@PkqPZr0b|Zi31*>>H;mzn1>};1Q zZRm)n!g~IZpjv)Zj?QRCT*m|$mX|%HoNdP#<6gur(~P&w;2FlHdyTKC z1p4K^ZQ6Y`uQkqVmH&a4(PF12Tu`=R32Uqyf%hc1P{UZW9;gKAFAHf%;PxL=TS_Yz zQnxlwiw<8fqxx3G>?3l8d?61aFkX={2VzS?l-b#-$r!n z5hGT3>_jh|C>tA|U~&0U;P+@`wz8|FxVewz>e3~V17vlK&T(Q zkG**Y{KsbX#<~omX#?Hg$T7?)5w=VI-qIjM&f`c@CU9c-n!j;!BY6bSr)0_E1)UxD z|JHguI&x|D>i?v^U18ndBj_46Ys_yx@_`m4Nf@`A?{#b}-uJEy7<<;E*zE64QnWVA zqjYHuaVSI{<7g=*K0>K;JEn_8`R?GkcYfBD?Fp;a#}+y@H;q3DtP}Exs#iVI$es%z zVvYO`mhmZBM^1IQK!QZrl}TlwQTinwB{UcPx( zt@mvYS;g`F@&yMQH0k)cKAiv7E7tdD_dCoMv=7b7GHh4&RE3w#h)}2EHYh6T z`Iouf+AyaTa*S#)h&7LIU5MwKBAXtEzLpg`aPnYvXMZ(^TgO!*ST(JGQ{V8Ar_P(v>c@-!BEw{J*83FhT{P{Vi`NdI zl4pXcPZjlm1ZOV(Mf)@*2yeXG0aNSWuBzw$E2`Ppx51;SIhJnIFK-Wr`bgioGNLCq zUwI#Q(3nED8W-kHiBR-p&7J^(pBcGTwv?;=vTn@rJesSFsOpgn*!5oY?p__#ChJfM z`nUo;MH_OzgqEJZpZf~7cpW1In%}(dbu%y8ITTx}@kvnQC>uY`$}n}^)_a!MXqvc9 z1u?7baZfr&`}mo@s$}QUlBFa;9<-k8+?{2JtjtJ-d29-akHI=i!`*aG^mP_eAEq#4 zH$Od(W?}xAh6#S{altX?RGSvzeH^hg#TW{%!EbGXC*UpHifi~5Nd&zn+@#|nBHmk= z5vQk|B`S}#%@VdbgVw=|fovk=VIAID$=wtk1VN7R{t;`~p%wJGow|=u-s|u%>sXg1 z_%`f`ZP@X=w-Pq4pidt3xAd7{H56CD-PruT!hK$~^ z?Hglm=mB)EdRtZ3U;fCxS;G@#jU(!{pk%cm933>Hbf#zGxE+EMW}yZ*ehw!W5`z0C z4i`^Niv9iz6&*eiJ%;{M2p1i_&`1n(wDFp0YPQ$g``~LOO-JjwIA-370S_lQXPu9q zIh~hvuGKT^$9Yw{fVXBDb^j}Yw}u%3+lz2>DXkLB^J?;FW;|n%pE`if&^5&Qg%E$i_k70g!XpX-bTVxT$p71dMw!TBL1M!Y)(Z z=_wcJ40_d7%wfCkQx_pN} z&`UhX5^uBKKg%8>7DAQA>5XxX{h@57M|r1%reJ9oV%UGBvG21>9k5s$^CI{`Fw3(% zglcuEQ@1$l38;S$2ka6EE_8#cK~0yzjvz9C?qyf)TWwRIU0Frop6cs+@T>>$`Sz`& zp8K`THq(TlS-Bt$B09Sy#4pV>LxfMQUzJT+%t zML&I^6fF~pMdPEEdpr7N8Qp93@{2BOZyDf-c}4>yUZm~VQIR?1agtFLhn(>Czh%{bdIin9_r70w0%rZ|KR`|TPmhAu-ol^D23i%Hkazli z*U`P5Z;ZgVUbnz=qQENNF8BI3+5ER;z*&6HT;=0aea}tv8xn?(>v8tPw;Hhe5M|`y ze~Q|dDXq#{DazdFKj5SuhjM}6n>ioinVhM-<6a}}AJ(ggC1@2&ZcyZnE(a*69#}V1 zq8t`jGjvf*&$Ie$gaR*ux!Fa0^7uR;S}ulm@&y#wX$)9+2a;vQ)N0zlm+7auJg9~! zP-~kC4N`-<7+`LTJe$y>0CbOT!nM=BcW&?ywzzdvz42wyA{aoQbcQhp=}=w8grgp7 zIa*tFV!Km-0rNlmgr8nk^E?C}5u`JSHtRvVi)G6jz`%bWuz*!vo|{_y3OU^qV!!6I zp6gx@K(?T<$iN?LVl?v<{}&WDK>GU`HWO$#IR4(=b9?*awEm5;!xI2XyIidA-uO7E zC-M~hd=X`><-e(5EEP(3JTj69gBvw4v#41-*Nz)Bbcwg_EK8u8JE^by%pBQ>C3>f_=0 zd>5v8(=T6B>^KfIx~Wv|BEI7!Yw!}tM46BKOxn5He5>Ix3i27|`n`iq9Jd=0YB*Nr zkh3{2G+E(?PJLQQp0E+s4XKEK6k^m7X(bT_Bp<-uHk2;vWQwW8>i{G5L5~ZctMu*5 z&jn9HkHEbvzH`}DTDo(?w;}5%x-#>3PoVFTW^cBjr>b_Gpf&z*{%J>e{tBAe^5I2# z=VdL)Wk=^#hf{{G$HrS8BS^EulO(|KX%sY5e0c>Li2T*Cz!3OmX*V0Fx%$!YR8uZN z!h1zduR)=fYAZEmdOG-mQ#D5qxG2xv1S*2{+=x97D4m8O*Kv zAXvIICz@01w?P&LcYex@*{STk_#nBz6kF_aOz&T5+Cxs6vWbME1L{2koeY-$`3r)dgRNszH$JG!iF8}B1XZTx}!fnehRbJVonIcW>!r_^%JRvo>pHLv>bk5-BM70|KXzQniJpT68X^qRRK5~*C#Y1&sBMX z+btuw;{^M1y3ce0_k-r|cNrw%9TAeY1i`wFPFK#t31<6?tW)z5GObQ^+XJeeno<(c zS!qURo{!ojY6IxstroQlJ%ZP%ZA9TWdEePOEPVlvD5Wa4uLhlxa?dyQo01fgZE#x+ zTja1;g#1JCS#Y%#Qs+wWFfyEf#fj;vVfj%DtTmH9z zqA=CYIyz=)ZwG?pKOOXFjxwYw5wy*yrALqWTZ$^ylSzsE)8&+nqa$o*iQl9Lwc1R5 zlEg$4N9jtRnKo@##O`yCZo6!7bqq1XXy$g_k5aWotpeEC1F_qVausYiPuJe=k*g9G zRx@XR`v9*#8g&7iu0`QN09FV4asu&wsYrOpJw-x|1=KGszO~q zxl0D$91Tb_)*A%ueJ^?I{oLKZTqWT<`&@Zim;Ts>?YQIwb+o{hqp*IsIer}120PcM znDbr*930QUqFS8Gpjv$(-YZl7>A*=$|cH67o`0bicxv!XHHMhZ<~VO z4s`sDOerh#*z&YBqQVgfCza`)a`;+x%M)9eJoRfKhCcpZJLb9RWxYL_aAzY7xxfj2 zR_4pGUUc~KOr~CEo3artbii;??B^JY;JLxe^1aCz=_d}T4{&*B=(X|jl<%|jF;}k8 z3FI+YMJBg0;E)UdL-5M*@>1q}WIXrk>juI%n*S^t(9_{IR>z03-`u??Kh*#>2I z2OTZ0v-;kD09W~aCj(7Bf461rJi81BJLGh1Z}#5)vwpgIADeioq^@t%!N2$H{V2T* z|GYMS;@$4}dn=gQTde*FkAy8;3^%NfPONtXtvL|v^?x{lBSKzXAz_@Q;SMOn2 zbbD~X;?0obS_0UA26f{))OZg$hmN_wwli8e+Sj33ciLv_ZVYRGV}3O^z3do@8omad z_&#=Cy>_H`h;F{1(}Vit)O+tYSnm}19yT^n&L!J`tapq4tcq~Qq`eU#Q|Y>Z>-uJa zo`C>5_VEo0p6;hXz_#PN-_yP1=iTKx=o9iwIO*fU;mQ^Wyy!mT?d-1qT)8~hiJlI0 zu{mm_0vIx}u9JM|zg;@6mt~;dYOl7Z04+S}&=y=UCm-Xxbw5KIIoQVR#D$d2op5pos_{#s5+qgc#=>!b<^^VOVG#=t^gY|ZRnqI#r?V%-wpjx2^l)GcxZsC95IeQ`v}V8ghWpQP3K2 z1T5-!duXC$2SmP|%F}doABA6xTRz^MRE9oeG2OnB42P|J&@H*{@e^yCQ)t-!456R# zzQJXpWdA^8UoKv`W8GrslWysAsu8LCNd@7(TTqmtq{zjO;mh33RtzXVYuKLKyHz9R zHi;ClMO*zAHMI5cxMYu5mLXGL<)so;;1b3c?3uJFezWvmMd9Cxy4C!`vg*b0|dTacy)> zfiwlKU-4puN!YTw%#%io?@q+e6EpKCQsJ0!5eE-%8Ss(#81=Off1%FU?6+isar+et zN<7~&rV%S!+L{92;rjOCpM#CReF($<#rd;=&bH@;sBI!nRkN#hKuzt2N6L_U8;gtc337pc>ai78Zb<41=y zm_6@)3M)>!Am=^T3W}F4mKBl?$L4#|a6`7nHtT7VMq$7yl@SK~ zF>VFIFSnfQciHwbk4TiD!oeYAa`Za8Er4T^1XXWtZ!I% zTKJvW{QAXBOZknks+Fo2TqIQfpsxV}sXDwZ@DGqkFol~!y^-#YH`Y+&t8)grEDkJl z1LeY9XMk!i+be7ZqSk2QUNZ}7WQM(rE%K!)EKgbvu2I_cIvqeBS*oDUAMg|iM>IQM!8s3XFu@S`CHu6sck``2p5pGfOI9WZm zNO-)TX0EWZ>lISN&&aLI_e#*DX}V!WeZuUGr#>lFdW0* zmXv#RT+q+_k|JAO3nFS7dmKNW%29$Z;JVU+^pjE&rh8p%0uR!@kuu29J(zt<3oP91 ze4;p;x)}RWJq5Q-v}G8Boe@-VQKwp!p>;}@z&*m65jD<@AN?v`OJ0W6~j3-IME|n9$*h!v4mf2(|-LhhCvQb-K&5xYXU!xsuzD0xslP<=G>wW&gT+nxrJ%)Jz`{+W;25toL2avI zKH9FlNHFqrGDPh6Ya&ks60VNi@7&yZBy+)4HoCHX3uGe5r2J6HzPiFx4#VlX3ti9d zEFIAL7<2^(}Djimg+V1pQ*Mmhb z$AIy9@dhFfzYCNP2I16yVfEQ5-tk*%c5ce|2MO4XBre3<7=Q!?vfNA6ZCZU6jjYb#f-_t zdzsh$SdAU^&?oHY%c$!z(3v*xQT?mglyYH%4c;ysLf*(OJgzof;@5xdI1gf(vhp~O z#r{|9S0?E{W{auCmvayd{3Nn?YeQ{_X|!-#-ynl{>BvKfaI!(9>(cbldJ!;qS(C)h z))3Q6X3z{kdm;7s^)s5G%e{*ly29Dt?LJK*0KP5QlJ`w%+D=5tJX*2Ti=>vc5-0Z@ zIyaqtz}e@_vnEu3rOtw=%BIciix|NTyrJ$<4G?_P$<*6$Rsh(;56f9ivA&P;;ZAW- z2$^3JH&`;*?R3HXSClbV_N~l5Zqh3z)y8gCmcQ%brRCFs7@As+9IOI|4*uKmUwrPU z&`RNqF+6(X2D%4@1Ge&?ExmL{IY3O4s+KWl1>rL8wOWe2zX(z15t3R%Eey;@D*iuz zGF7k&-zQ6AqXu>_RZ-qoUmJv?+bmi;z+~=N&lkTyE z&jt?#Pdy+Z2bU9+QIT<>M4t5cJyAvMP0X*~9i1aA*+-e&movNyhUZb_RLBkh27yJ| zN|zoI8q!+4uYOb4m-rg8SHrd)e{f$m1Lg0B;vB`u2^dY zi6=jA(U34DdI%mOK|W$JJ;QBP+j;5)>hERE^-x+KsTEgJWI+z(BnP0XmTrV}g@c>{ zGTa8J?;SyGTcN8An$oNK8Gad%10p2+Nid#+tmuqAykp!CNr&N=tQ%n^p{pN$(C;^@ z8Vbol1z_QSmfpcm9^eRBf3#wsb}#Yi_Hsr;lssN*1@JD*cqOE=*M(%QzsdmzBT^l$ zHGeW|-qP@=!X)}LI|l@~w0RsjN8uTIniFMZLz2tKJ#CC2VG%|nATsY*jv(eA)Fj+u zP^(^yk2~Z-4^^0F$@H~#xPhy3P)As24!w$n@Q@y*%&NZH*b_h(fW3{{8q?)UD2S?* z^?_BuCdeate?tqW1sTB14f|uYnVNTK(Vkj=zlLHz{`a&aP7(1;QwbaMM5o$ z*;%vrp@?;2?_wp53ENTb7zt7r2b~2=)<{K}I(tE`2arD^I zkCk0>oGQ@OttK$@%_)9m_+rcKp!}hSe5SO?{q4>MY0#F;?7%}zBaK?Nlv;7;FFE~? zR7DV{hw6~WQk929%@0A3T;hC>aLI66fVZ1Q-3k?KD~Fj3$lpMsifEp^A|m zQbpeYpJnN}tdr)=idX^e<*))SKUaYQtNyxp{OP33wQ6vNPBK7Z}8^z zvcj#D2=w~2@C{AREWfem6#;8zyzA;J*_l_lJF9ezPVTo&u|r~7_oU#{Lkn4HaB+=G zHohB}8cSd%vEhZN({{hEg-UeYcWYko^50xt_4)y$K`AkaTfuv?G?;8^W|2PlAplA? zFWL`$M*Gjj{bU0vAA(%4*TxE0;??1&5gja@avP6TY_kO^;{1H*ZiAY_vDRg9A@^@$ zDBWhfzbEwKmKfz`4ORy-(7%~0(wJPy*kdMzpgj3>J8aS|7}=m5eIIB+Ikm)K#b*3Y zm5N)Bx=TT;M`1ydTh>f2#>u_dD^$?`_rr-DH8=Fn4)1)y;}1ZzWccB2%fD(UsNeLG zN@Ap%ysb~lzOMBQW)+=maL`3*_%P*m9~%9na;hy_;Y+Jz?X9<@k%*enZ2UO6O< z8>ykWT5UaK{*eKj3a8T+rC<8@-}C)ugX=0J+29x4Iy?HE7aeGogat(h!jHEm~@?mCXOV@f1Kua zy1v!f;?$+id^?2RlI*XLZ|PNyaG)JJD0j4mdHj)%b?ac6!YAe0=W+NtL(Jc*MiJ>> zlvn{`0jAo5>9g2IMhY^6^Qnn=(0`1Hw^(dXoJy{ECB(|gJB7g9;qtC1b$)@;X?*&A z8L%8nm5uK+ZZU;UQN5|t;;K|va)5eChp-}|}F z+}SrmmI1z!255iI70U_zf!VHdB@b4YmC?C#0(FbfE3AV_2k03X9jDnMz2x^AsW6j0 z6~%~Q6sgx+x#S+~wvJCV)CEc9w5d_2%D{--6^$y_e%DDhkl$Ryp}g(M&k?+7#q*cK zo98pY+mC}aP&{bhB9{KRe)1+y|@!3SSqS9YcR(=j5jcDSY zn)9%11|EoxRQJZTD4DhZ#3RIjvGU=-?AE$dVt z8HlW)Mv^@Y9S^Px`9H>UA%RzPHx6gs#?OV|3=?TJp^+=5`(rkMof!A_2TO;nP~!h$efE)7W{vz;bCARs%Q&LSfpo%`02fDx@al^|##7udyhYDUw1 zZC{U}!h=yP?lB#Z^0fsrtPKVZ$XOVg3Tw1ZsBv|MN{NzNhP2~Rri^bGR z7lQ?qEu(_jD8jpqYg0C;J>OH8(`_tsa&jJxE77!ToDXpda51WvEJ3r90e(wGjw)oN zyjNQdf1hFsE{lbx6jelz8*E|6fv^l_f~h0%ErsRx<-Yw^J7hF(i$0NkiHbvXl{5sE zA0PkdNQij#fC^^?#9!k&T+Xr~j600yr_Z)F2@Uw18=t40HPS?A?M6^b8fS6)3;7#D zC|0V$e^>r?I2lZ0|J3zkfY81xhJjr-UGB95vA%R)wZuf4$hGdfXl|EfMmv_F2|iy~4C5owNUOc(P1U zrV)L8fwuC9N=|4*p8kr4FRQ8YTR`qgJGE?rXoMb*7uGso*|b&*;4?rG}Z2 zS6QydB*AeYN63q!NZ}|9-7VT3p>@Y!Qh{O0V&Fu%YK0oi7>qg+mx=lriT0a%|;=D>PPm8+w?uhl(WnT-&0@57`7-nX{BSpp7$ z`}eRNmIY4;8SW( zQ@WB+Fjsn#i*77nal=^{i<^;=E#eMGwA6LXbjf^n*}%&El#ZK9c1SnqCVY$jtFtO(G*>rc|w>d~6N?HWg2+n*m@{zU<84t9-^H*}_Zz(eHvW2|gW+jL~G zy{M>euWHkN12GSu>`?Yc3U-Xx^bnk9>h~W+!>C|#Kw)ZYjux)qEN+8Qe)JHUcEd*E zvUjFQoOaK<#9SV6$Jp9liVWMufW8oYx@pGVt6YfhmFzyRaGAS}8U^eKsw%$Xt_dVA zx7KWrUs~_RiY_^RpiUK`f%krb`%3nxYK=R;pKcRtT;`Ny@)QPj;MeN^u652OZ*;06G}cFJ~?tK zbq~|5q3zb=uR4Wes+2&l$X^|mLo3nTlKIQTh(=nUBD#Vr2T_r~nF)dzC$%4%K7U9ItI`2BuZ!tv(zIWOd zz}Dl3C!_9yrPu#eun$P_uu?@K z>A6&`<5ym(S;h0Pa51rS{h&07G2>PUhlCWsw!m-c=PTiGP*@o6y)98B$w74=!{rel z#pt#IY=&EYU&&;JnDD0w4Fiv5uK!)OZ3e%{$E`f+rxuN?0l_zGodRwoeb=dPpM;NY zkY#DVAfI$>>|&Z%Dh)3P3nB65J|hq-{lVt3kaGS<&B`yMz=YtUB^-r#smrGL%9!{a zTfam<1&?0BldHklJq6Q(+S6nr-&l~VpK7K*d$KEp@xWP%K!FGOH3njUwHP1rJA?G} z-_C&ea-8*KJc?%P5Qw;8b9z}n7D+mH0##!TG{#uE1Yx(=2aq=Y=mYoqP~93ZY*4wMKFZM>V)~t zRF%LQhYDNE<=7~-lCxEQmS&z9+<{x<2$B1BTOis&!qpLBnBM{d>6AZArdGaK=zm5x zzw!A0AB+3A;MLxkM%;lq?EgQ&;$y_Dcr@0-0avrtnWr}ki8U7^`#~7uI7P@?zQQKu z1}~BrsyS>HhB^o~a(#9VkNzcGByUk_$q4R-#!YO?2#JjeP_MC(7VA#v#77~F@Z<1t za7i0=77kggYa`nnsS(CDTn#9rWd9`e!@32~M;?%L8lZyU84m|6QBM&U2n1n5qudmc zR16}{xQGKM13;?z?_waLH|Tt2H-%B>%20-Ultg*11=B#0KFRw|7@2~R?v|$<9YB|; zn0n>x8c>TTn2Cx-@g62vtttP3ZwYq!Ga@Y6u)-psW`6`OMs0_>6lGNUXBdkXE)7{E z87fmz*{FbAOYuLwE2tT)iwPgx(qc8^5n>iTvI)xxK1N%*k+*B4Xo!gdTJG8KBAFj2 z6NV#t3a$=Vy^}{c#i?StMRw#F`{F)`P)$*{OKC4%b_%kAS1qz~sH%(-q3FkAuXe6I z^U5f9%AiAmr=1u=wekp$R z@Hpr+%+dqW`M0!%6Z(G0G6RA5bQRR#JP_bx@bdX@PfB&Lf~#42Kt4@rjK~CCFu6lb zpO7pnS>a5x!QUijS6ZxKU!PXmAeCso*s1Dz{0!CNp=ofQjBDwE7+Y{w10`r9$c&QUVhiW2<9kRKXvFyR)qTQf&e|&& z68XQ*`h0lviiCi@twIBdH4kB94owxYyuv2>rZ^MLM(CrqSfXO4VGX6^(&Fj(c>BgO z4Ja*i;iv-(-;`R0w#*{WkMNVEHqEMiWdNl1{cKPTw`>E1>0X zbg$f&Fc0N(F}C*mV8|=wUiq=3JsNvwJhaC*&X!8+iKPCj`puFLDH3fttoTO(*ijId zVEOttF!K#4(hC1&9$w+gfx|xTOg|SD_PHfmPlEdGC5(wKgU~-<7l>nuZl3tqSXicO zG!R#1a0v4ReK_U93n5Eobu1oE!I||RK(F11tId5MQd){)n>11^j8TT_httNlX zs)A8mr`nJ8n|k4H7)H$krNF$D$xTNnL}9?MB6006k#&fq9MMBVM*vXQ1f34`S;tvj z>eSQYD8~m83tN+ztpDmcp4MWqaI?J3^w2ROKHy%67_CWi{t=1fpP{c(N$P1|f#Fi7 zyuHNMB)1<2Eg+=fUDlCH>a9<>s<7h_`;OaT|CHnR6XV{{L9#!OPKMHMgsxdgS{rYb zQd_gM$4dS-{(6*GFf!w+dm)u~JTN#JLwWF|H}%DvE4fX}b|i06*0Yey>=`nl2d-cu zHVSTTc83~-j4#1ZTtf02O6WUSHwz9e^HNHQjayRfVPIN_;ujeGnSO_*td~-{tl_MSQTd1<~5cUk0hLizZXo3ioh4 zNz9!h$!GI^(<3lv=mw9J-gB5<{WX+}Ddo2*L#nTYW8#*rUCxtDvLrOa^QyyKQ(fv|}4{8XOPY7PCy0Sz&}1c`t1$qKg?BqsW)E2u;gh zq;Ic}d})GLm&_?z1xO-;7zR?S9D&ZMq;>2k+i)()-(GOntY|A@6wkp zR;5K3D$~e=RYx{MTj`7wS5&euO(R?G+mrO(q)*%fIbJwT>-?Z-;7AdxIkM|6Jq<;f zo)Bvtla2U~U9X#8XbQi_7~soE+cwTg7^6c&xT1=B-qo>@c$SpMwBW$CMCYNnM(M~d z36u+Ls%@IJq-)J?VgC5?1P{{9kN*ot7^$HhO2Te_3skqB|1a5QTl$r2Tb23b@cOT( z=$kGSVs}!EkopztpnE@Ya}%ugrJ2IbfLLEPay?DD?qLCs&Ywmy3}YC44#j-_XwE21 z`xX$j5*1;b(7DEPxQU0Kj=#WFVA|4~_4135N+N#)v#JdZtsSj>62HJ}`mbp|_Wd2( zC0Z{1sjtu%(KsSdRlv|AzX&&7x?J)(#QPMs={1AtOCq)$Nmx zJfz@$8}?d)^?sKf`a|ZT9GeMvTWkCJIEZC@7}XLENU(hUNSF>D^p)6y zhQ|@HVN~uz!Q{DVG%ZzM3gm6N6`IB|+aLLgC180XQ#$+L^z_g$AL0^JM*>vlq3{^q zOR{y@I~z)9KV>IS1jUctXswacDMB-38Xg{JA{b~T!51YZ8F`V0&uwk>Fn&&cEJae= zQi93dB@48WbCftzi}HF)u+u5t$&w!HSVk>K&j;T8h)+UBM7MzVmb3I4^d_{)2!Z~sv*I162ixqg)KI-J9UFUuvC@krqj?K{rz?X3rZ2lNpu_a zKeZIj;5nA3>H$r@m=zq-G{k}{ULIl<%$4@csQ|KN56hCIoPewqX~yE=kC4nW-qpCW z;~nn&dpT+vAbBb|1;o{Wq+Ev;kjcgbHl;T&ou zopw3gKvCE!_R2IbB9^os)Hp&^nRf`T(1Ss#zOljyzuSjDf_#}da7c&wRnQ*BE{k1D z`AL9ZWk0Q@a@}PHey8P$o_SnNmk71=B~S{V%H?Wd47BSQw`VS8GuwhA3@>&2RU8jW zmp|*wrQxETO7YQ|*Ny74@G&x2%gx={9G(~HjBt}k2$n;A-rOUy9+fjQRSvPI?I$5| z!Kwb!EO$GWHCSraXyi*`HAv$z|{4x)W`@7P~P+*37l<^p#Obs9yu5D%qDj|!lm=sigydr_=Ec)T zqS)s7(!~VgOM$JMH+*38Xz~=tPTITxQA36#xMMclxGaAr52EiY%fxd8%LwUy#0``W z>Tz^pT>bD%q2C8+>z??p{PQKX6OjoII*#xYZ*AHfwUDuH6Ah2f6V35jJ*DD66sNE% z1_Ahi=`Ohl{G<3Ke(eS;wVyUDkaHyL%=f&d4DtfDIB-ND%I)Ex?w5Ak>5Fiov7T(egMXA=s^w|GJGh$F9O@PYXoLabA7iQSZtPu2Kj%qd{u6i%dsmj0c&!!I3p#k>h!)bdJX zAzWUp%l@v;4r8)_VtUh~0}<5y@;pOyMAITUq;2 zctbA6e=U4J&Mp>Kg*fn>FCgtleOtukh(Zh3BBkcp#R0|psLQ~_#ZD&^r2}WgJc8O~ zM+PGlA~g#I6J!@Ph7P|9ooWNLC@AWFvp?M0HH>8w{o-?32z;%M`71PoihA##^sz3+%opvYLgPuC5znZ{QU^w za2IuI6?1F<)K47@TnskE3}<^S7E{u`NRw1`7R(RWt-rZ*gS|wwt6N zC*qWr=$<-h^RGsOI&54X_Hq-xG=y=EpUKro;s^Y%g95AP+bnpj%M;`tR5H?Dg zcW*3GoFadd>~__JxaQN*M~u;v|3qI0@k`DLMXrPdD3&k;DtasX6YfG_Zf6mLpdU9IJzT0xE*VXu4#v*xQ zZAe5ScpeHJnkNH31S&P=3LG!Bm9-DKwq&C{UZ$i!no{0Ze`8`IqA2^>6kRE4L0fgf zRHb3o^8j%Uy`Vu!LzNH^Ol;{*?*|JrXy+u7wt_6&|rwuYj3uj*4>IJ=;V;xr{!j5MbI8kW(pC+qwj*ONB9g(U#T~(h$xElCKK2tYT&hA>8>&O{nL<3=BZR?;;)$W z?M)q)@x!WxEnu9I)dX%c zmx2zKIVDKKvp;bm&FZaR4p^#(Tn|BS<*rKmdrOF9&RYAQZ3|gDS3#&>HyFWO=Oyi5 zZdSocSMJHL5t#c`kuXyumuBY!EkU_q{tTTTIqk=#cf2UB-1a4bd?u)xUsWv zex$byvA=WOH=0T##3B8WL)@#KD+^B|7XNeRp>f`9?}xLSFAprMaw$I0eO)fO_Xjft z4n%uBi>ET9(k&NL;{y5cyD3{LLY7Q9bxRd{+z}MI&lo0$CE58zx0qz2W%NUuwdH%e zWf_Cz8EvXXidYTds+;d-U?}N}Y1Sm+e583$=V<=fzrv-v7M_E&lR5j9X-m2o9K5Ush2p zWUr)3HPrIc5Ahaq45g3ZOoKpekEIscCcS|P`Kj6G|R#pl@Lx zTu^U&H_ zr*cdqEoMtOQ1!bDS1eiD3&{cON5U_p2nhKdDT7)@wRl?k3^?;>E59Q-{&(Thb38b_ z--ixMSO}c9G~R$ih$;*s2mt9ty6`+Cuq6yUJ-LO{mOwVXqpv4VQf-g&>QiZCN3bdd z_}(tsdL=+hqp=1Sw0ml?3@2yDJtOlyV~%aoNG!b%lB0PAQ1il2OAk7-{~pk1LgLYB zawZd&ITDdW7hWfl5k}? z4N+|fEt+E~bmm4Y)vjsoebwAu8ip9Remm+pGn}Ofk+gu6K#U?z2|!bq8bkz)OqHjriiROU9xp%fW^Dqj$lOh|&HE$c?Nh9%DqVq#+2)w) zOAJg*o}5Oc!l1wr<5B2{|D!dOCwMM|wbs&E_iL>P;-&fH*frL+ zs}>l5#}pqg`>d`x0J^aYNF_~I=yLK{jlF+NJB_ilX>H6rXwz4n6E-F1^eHt5Qe=Q% zAg#u+VK?F>#w&jf_mfa@%6yh=%V`|VZ>TEYlH=v1)9`i`aZIbP@m#;Q+?crmw9oUsuj{DwEXGNO|Z6Fl`eM zaO^s^-3|ff!rU}2(rL`qybu~ORmZmHeii*y3k)Y{uzLfdTUvsWLrUZO-vOmvLoiG} z+^atQrxIvaGl$FWkw8v@GG*5k!#HQ-&JOHVo$&PXZ%T>>!+@Dxg^%E}b?SSa=yBR( zBGUhtHg!^O+rd)GSJn&Xb?bW6?4Y58I17MPEN$w^BlB|`qCvb@wY4R%sIeW}ljyEL zNMy}q3|-4MtJ1eAzmR@BqiK<|tETVE?(rat47#N4(k92t=G4pK`=+3o(pp}!-f$xu>0wDEIo&2)(Q3x{;sM)M=799l{3$rlC< zW7OQ(6HC%K=5f+Ek1eCE({N@M)g|UkG}F^l6ZnRjo^!eXD%Ymi)%A{9UqgD2p;Bwp zD9|X-D9|X-%@doAfQ`(o~lBrilyoCp<78pW` zk}(lSot%ZT+3o^jcr4vO&;r`i21^By_PURK^e6gmgaD;}bgT^^U%`^dcGb8pQ$?rB zT#wEnfi>CQH(@>iU7GWf&jUoWd=G?a;!lZ4FN-3T{SECcjZq@+c^(R&h_x4LYFD$4h+BYJT@^Ch z*ORO5mB{w7@0->E!xh3?m=fdm2m~fG(zUg-ksRDiWLcuQ3{G!#ZKS5uhD*L0)If5Yp#7J+FVyBBFsfU0JV1X8W(0o1tT20{eRCM0?a8#V{O^@E_?6d}b})XwnI zphXooFbm$VzNA!Gjct1Y;3rheNT6zY1%r=~VvIS2!%)9v$h8E+5E7V(FO8KVoa|k(6LRgv z2^jpvzC~7D1#ZG?D`cz&MUQHunt1LW?PtPSjQp-0I!nd}>OHOxO<*M;9(Q*R6nSRo zc#HBss55kbu4sE71T}^q;J#aFo_aS<1NL$f3 zXfz19S|K%MjxK1Y-avaXS63Hv{cVIsSyHvO|EszWGn3{Y%^$b(3({|#Tti&{$K-NR z#l3CK-;>rU{8h31xm@iKlO+|17_HFVT9;!Qt{_zl8iAYhaw?7T6zbZCX#g^(`QFK? zSjzcD#5zd_wKf&!Dlah;LWj|;N<=tw(@Ed8jZ4yuaKGB4?Zk1>So%UinZ707MgJ!S z2292GPG$J07$P_f(8DDPeOr63>}RNYTWvV6)*$An&|w50LWps}5Ape?v>alZ94sWX z)#pX{DYOgS$Ke94t*^AH)Ft+e$^?WZO?Zwl`m&O)MRGZhnV%-vS za}FX?=2|T<#IzKzuHVWbmC|(7q9LYCdAn@^Xpu=20GrZq#0#8;=g^|+o-vE10dMjT z!Hia*JjHacYjSR`vU$0dXUtCX)yp(=+TSJxlkXx43d}Gu7ZYQNW;*@prPkavF9*vN z>wvlXdQ=}hS8^MgbMj%VurouaZ5X@8rfn=IO26y_(fFb?BswIxP|j8UbV44wB9fYy z-u*HI71YK&5BkI0QsNoL^ik7BC)c&ad0FVyrVtUM^|U^Al1A{AG~v)NM5|KZx~VCf zI$`u$XTrQB_NIQ&5>kGsRXyyeF_e8r(-+^ZIn6m7*_tAaqSlYR31de&aZ!ur{1useM%KluWoN^9#i{~X5pzI>^B;NmkBL23^6V*lbMAir~Eng z+^KWz0bs`bN>iBTu9P2Sqb23XRhTST2g&R-xALS%P9?x`F^XkPFlV!tn|VOpYHt+? zg9f#Og!N=yRr~?Ehh{?k}?Y5CU-?qJ{iI z2p|N6A6~Lgqdyc-YSum{XPA$XlQ7aQ3aZl7Lu8fOM*o!lAN?V<^ie<2FEjL@5F}_K zTFcIxq_mK-&hn7iVD!$sriLUf)NeOIWW;xAT61J?PJHOe4|D2}r5(#|j&g@~Vl=Yl z4>{yvg8jU*7PR#Icx;Km?`3CkN~RYtvI&DS?_c7=fr{BB3c4h0)*Y=p3&z2YJmd&g zUk@KV=u@jC2mN|~dhTwX%+6)#Uamv`mzK0PzhbTaZpde$Lq_&@AyjHjIqI{VOU+4k z>T4z@y_93sPyRV+DUm;sxuY@YnR@;#C3DO|w=@Zr2fU~e;3!w$L^4I;K&|w|tdsT= z+tGKa@krRk&v1;Z{YAc%p=S^s-ek!2cY1?H%2=#v>T|7a-;mz5lxz1j3N#8VLIH{_ ztwLkdFk-4Y07<)xCwRh8VXkN_q*8&YO7N9^a&JsJ$f7v4qgCj497Do1YAWjoq&sV2 z?~4%X8al1^VO+yp+mMX{jRG$K1;mJ>Gg+r~{2iEq(600w4PeL&gu`B8sHKT2-b-{8 zLPFYu`~wIJMPQNk9D(Fv>^U5`{Jlw#-1Ns+4cxow3vcsp6lfIKO$tzY)&;;zDe$3Z z(!$c%kC^?EfW?a_BMrz+m*%qVqmwcm1l0Id`4q1D2{wUo)fCU;3Bm0JjB=z^w5dnL zun?exm&%V3(yn8h@!mn-P=Yg9^>KdPb*S9c-u7_J@GkU|g?A|x`gJ`z zH)Uk-Ut(wpMZ8NF!a{RWNONro<87}AVL`tNN0lHbBZC0$#r?a<`Mm8pj_Hr*UHVV- zc`{$6v$zB7GLjvWnS3cjfV?r|UU!?OVO;YQpw>fQ|6x5tLSOYw!bNFFJY>}(OUi;X z9zl~Mw_NxblTbV5x{8RoHNfb~-&i$-?dCg-&nt5sM*6FneCA z9M7-*=Y&yidww{3apdYb*C+GX=Im0`&n-KJ>$mVM58iy!nd-seK|hda8J*CCI7d^} zoM^MTD{>8?FhRLs&Peb#-gv#wRV6_f^GvDT*`vCAQ*-CNjr*I1u?;F#_Tf&mTzHD` z*D?&&QfNUVd8Hf3nmHoCriW}7hG5f3BKM5mpl>o_$2qx_ZAAs87*V+{g zND)tDe=YMf#dSqIjjt)bE0y5KZEZ9$eHX+z?^6u7#XqrTlFE@(Y6pH|SDwdInlyni zm10|LuRKwd?8J{Dw03zpqBy4^Aooqlxcl@WSx9WEOP+*7(pE4#QAazD8?(H63Xcp< zyGmsH7D>IeuA*Szl(;LWcP&yr0#ch?Wj5Qf1a_o+PAy&{yy+vR^w4LZ>f@>Wx@k_< z;W(!fAm{>HS#GlfoV4mU)qkP&G=VX8r*FBPBgd6fCnpqi2Ch8(Suil1n~t(citmO} zQ7GYHDMT*Hq>JX_j_h3DJ1#Mvn#tM4}nEZ7!uztT}=ey=Z+Kn=8 z_H_~*I>mJ9WtL$Q|6#t;d=b=I3xOTiwGPdlgnwumajpr$o}7^gM+pmQRCf<{8b2V*q0h*UI25A?mh}VjZVotlsW~Ym2RTWk% z@QtpN);tC5rKEi+1XB)IIZ3^0Sq7%X@||vGvFxx)_~-Q=%q&B=CuydsQ$d#4xCm_T zq`r=(7{W>YWRh+56iA|7Oo@#Tq0Aej`wx!}Eu7RcVcPssv{G_Xdm$rF(VS9@yLrjJ z-vwaQHeZa^SEImnQ2-!Zney9BQ-=PY5_}Ogf}!n&Z(IA)J8rUd=%OguWOY3GXni)YSB*zcRXJ z=@c2%kR}C&m@9uTm6`+Wf*<F=JNcG#@B^h|}y=W@X$<~{*lM9Eh8m#4e8 zFkyX8Bvi~eJ7%5|YJdRa;Z?Nw+-E=2ccFHXJ`p$=pU1o3^UYQlN7YczFD&*qW^S=f zl}SpPff%<}f!hH7S`Qhuw8Z%|P7xr|iuYCSGT4mDcSBJ=Xagz@>xG5vq|XHa%{2_i z4`hIvUf7%-TQ4qkpQ-xhPG8e@JPg%@4?<7O^4`n$`+gV)bao7j)mn$5o2EXDt7z}c zI^zbsNKvkQR{{a~9F0#jCB<}{rz6Z&MNf|4TzxL4^`@<8FJVFPqCRzX5%wEPz3N_R zcV*HzeeN|9a#>gex`ah-xT5Aq%?WCCZ^EdIc}|{IMHT+6-&l~Tr1O}*)Fu{ADkF2o z_EFOgr)~@(r6OKUe`D|Q^y0a0wFFcQ6(;ClY+n)NCyfOxM4!&XOiE}CWBQB-#+1}B zf%D>ZSxt*_9M;A(Ixg6h1!DbEMw$VEZh;wa=8WE`q-uZ$;PII#jQ*mzyR&Sqo=ue`DI=6MzY0 zGE#1-noDLPQk!!>47#R2G^{cbYMq}=KZ|SKrG~Oeko8(f&RsPmfcOkLN(-8MO<$Fb z*b9a1qbMhV=Ha$~A(?ZboOaAcygA6+tWJvu${|EF9=c1klXf@*U5T+AeH zRy2`V=ejSnlC=!bxEce&fUw&XdWvam4d5nomFAGOm^B7loRsH2n5FJ_|B_5l4ggH$ zMN#w1F~x+-;zbvQkGg5k1d;k10Px1nc%@L|RK``8n~w&B z&lEZi&Aqlyuru7x3At7SikJ$D{HV{Xz*>koY2KcpHG@c_S!W83$Fz;U0iCFa>Qzrc zN_%CxTzCnGwU1#8+RaG$_n5hH%TmnuUF?s&NP%{1EO35!~L)Lt+^Ie`zkc z5}p&4TuHfhcoY&|f-{#@m9XK2=q8oy<4_;HR-p}qk0YDIq7Nntik0?}*2cIJX7qPc zlJZGYgUO<4xe(5bx8Gp$%NP?*zMCXuO7n`^dY&Tt&P_z-TXLIoHt%fObTDT6j_oj2 z#n05AGUg6lz6P7C%w2QQ*1Yub5K-GFK+19*wJ&Te%c?%EDhems)m_WU=w?MPEp>?z zN3#zY2M=S-wYsMWUT_|)Mg7Y(bFiQCS4kk1e#y($Rwf;c+8)B_vOePXF)~cr5K&+; z0?Xew<&iTXifKyo81k3sGrjA+R->8o&nzUT%&Tl$B``Q;W8XeIvGms_1f1CuHDxb3 zW(`iMl+{!zSDoO=)XF_>+Ays|42+Vfal&pYQmtM~jiC#hxu0yGrsDE2ZnTin+3hcz z5mG1CsO_K?N$?D%%J4a5QyUtS=8OG)j=g)%1Dtvy-8|L^o^56qA3Z^f9F^%&*5e1ZFrzv)D8y!7ng;=UW2|chmi`t%RC>-0VeesfkW> z#)-n&d5yh;ckk}6O+$ycs8OT{GP9y&e=dzlNrFbg8mFvtX#G`4Z;Jn%%V(6MmJ%b# zr&IYOO!EM1v2qqCLBMx?*lnI=#BsP!BjYw$L}WB*5&90nM3^l7qU}U1hM47DJM|tP zKk#lbNvSRP5Pcr;CDFWvPlmO=`pQSAycex)qd=p;00qEJ=qoWkF-^Uzu{^838ynWE z5Yv(=J3|gw2{GZZ$Z;XjH-*hcAI6;U)m_~f)fxt&sQrV;@xBra4!X;!yWJm=fq7^H zIE`1D#47p$M>x5TF!iVpghbESrD=CADZm}oFjdN}ojjJ2Gqqk!Nu zG5)ET2~0bf13BHhU!zjHxCzfibF2!hO_Kjn4lPa0R~9KVVkAavOj<(Yc{y?y`6x_iqsXiWa9li#=AGFu+ zZ8Na3bC2z0rXigL%ouaZl1cM04=F-P&e(`AVL!usb@FGA#FQI^mDa3#8~5Ox289lG zg1+@Vj%Jy!{N1&+M9|$76Ih%x=CBwL0=^=ez`o%d-{nCodY1&?Q`YhC`Tb1i0OlFw-#`DEdXjPJJp1ttIhAOJ~3K~y}m)IZrFP2r`fA+SrV=OSSd9m|oXF}kXDPNn&b z_oUQlBqU;L5d1%L!FyE#!^2K3GDzl_l~h&2K}9vClp&=PyOuz^a9{Fy06I)qk{IR1 z_fzJIw&FgXZ03py4(W?p7@{XwgAxEaWn2OT7*i2!gxXH&hqfTBDP2mmSQ-5UQXHm< zS+5)u{}arlOg>jL$8Fvd)1DCCIs~562~$4_D3)g6Htr3`+-=TrOke$6Oqhv*$J8HV z?yqelW(t!7xg7<^il3yAP_!Ut!WI<^LVFt_qP1(#eOg1*Ebv{F+QwQ$$*2XW23>8DJGLHMhf zgG^QgCzRtx^fuba6jbjR8q8?}+8J|HL~_S|ne*H*#yNAn=w8hWq_~Gco6uMij2Gtu z#-^{VC-7wm3+<82X4>(*RkmjcT~$BVEi4cUn3&>x9TO4Kool{?xW-~ib58m(ycxQd zcnt^WhQxz$>^Xa?N?@#~;m~HYd_ra)>$At^R3CFDwrls6j69}JuVJ>UlXybOJ0Nj@ zY3rd&vvBPq<5WbOT0oiGt8=87o)-eC7IT0(4m7w<+n6}wGX!|2ai|xr@5^yz-qf5( z<-lofSA<4%zLNeUe#V+NMe*!{ImP#n_Cby`0tBZ6U<3s&^$yB^f#7w9sk zN01=yQP#qvh~5FAJ!MR_EfLd%G1vMq2yj5yaS|*Uk84kvLB;eRWsWEblW2zGgjt)S zV=8?vYp=Nrt&O0=xp{8A8KoWt#4c$YJPSvm>%A3-$;Qewh#Dr*iBY98@rJ_BlEM0&@(SVMWzYM&{naA+|k2rxkRZ6C()-DbPep5%qIb zC$uSZZfzR{ZXyLZ1?!zE(bE(Fx^7L4=qGYWk@Ly(KN;rXdA+K_YGqe{)TMgXXc?~V z$a$4RPN(27bb`l;i@8UA9UR`bkWWh#n2RP!n>kk*&a+UiqpVBN7kC3gNiqFQ0fLdL z#1`2Xgu>DLyl#%Ru110Dpa6Jiyi5X9JyG}9H+5)alK2$lL)i1S>EG?!{kLT;69_p@0$CcPW!w_I&2Z3PLR`oNs<^~H?35@t~9B7rxsrrO+MZH+P?y}8AF(ule!5}yYVO0nt8_x6d zk6w-6D(GTBT)KdXI--2l|a zg491LB~*)p459wg1Rxt|a}pxP`@LFT)=H)bG#|skiL2BdQ9vL_rVt-&$C}{Fw4Bn< zQf*6Mn#P&p_gQLQOZDp|Jac^$avN$Jbc}#Lu*f2lIIO(WUdxjB`}hJ zNMJn}2`7DL)T+lk0bI6s%W55454$0I6Liz^mp3nR5@huGGgx>x8+* z7f+v+djb$6rmjuG2hGL>i=DLn@UcO#@bZ(j6~nD#x7A=aE)!uCgD5fZC|73l!rAaV=4xr02UTHOrjk9QIRw7G^lP5eU#a3J(44 znRg|~gVx8~#?XT{F|ukKd7nfVXon^w4P&ZrrCdXmz#w!snxcr9BGtr{)2PC9$*QOc zl26G0RxRl4FHj8%oUV?PR#x2JXOT7vFK(>IEzC&7hD6`Pn1{+3vN4k;MEBtcp5v@` zGhS9-qt-NLUQ4!tjyPgHX`wN)=hf*AEu)Djo`U%vrp0$ej}4jXqJtIzQZi*U8#k}! z^c>jfBl^oQj8FEF9tqdeU*bLeJ^&S2gPV=R38cV+z$u#z!i`uPnT&0>K z&P!+dFVsgB(pyD%`cPVF6gi(#Ovop*)mWsXYJ`KG5Cc^`?G&me=a^D2n?MO+wZ3_B zSZp*CAz*xAS^J?j1MTBUZ)|udMTgX{?k9SS)Z~QPj$Q9IHbm>AQQ&!{07ZwuO$;%1 z7+`c369J{~qNxbyHfE&Kq!%N#V5)>p$G!;lEg3H%u(~Iv2dN2n-t}i%jl)oKdI!XL zP9$UO)QHFeWQs{DhP-;8IiyV`UrS#Dj^-G}13p4Rq6h8e9BR(stsewStXenq;V_PG zqL4cZKhD(tTKh(UMuGDb@cc`vg%Q$oQ$qsg@9&`fix+qfOhtaM%&FU>Q=5?I2M><@ z_7bZ=zN`M!FO!%BU-kmW3qL7b*AgU}rgD0E>iyiZVowMd5)nRc&$mio938!-J3X_| zQa1_hSvcrhObs;jw}qRkw)fNsq@=ARt-%!udp=LIgIU$#B0^gafM_g)5-0q)e|p4u zt~0JVd6d921Lov3A5$KtJKep|Owx9uEgOM3rN7p;QJ_&^m;yCGwMNUUp+J?uP!6Tu z0c;UVE8$^G)#_b8^=OWh(JSDzj=5*$6G)f}!e{xOj?)~XNi3POQrS>r>L!4=hiY}+ zUy({=>_mx_QpL?@d#P`tou)d`F>~RB_jyUE8G1aLw$x5nTnlS>^_7qIQ>WH+#Hq0M z8dI5&r$m2IdwB@?JeGFq4_fJ-RhEEtWP3y)>y)NBNYIzb++tdY61Dq0ngYhsdE4zk ztgBxfyO@5Kq%#TrV#uui*0YYLS|fu$lfox_8e7Q0=8Y_V<(1Y;J@U zKugVqbQy}7zZ0B~`VOJw5ilMllMuJ%g^gL?$xPuPqB|6pTubv^ybmXHEG;@W+UBx# z!hmBwCk_xp_hKS~f!d3*b|HAwFD4|JqPA%|8rr8RO77UhTM>BLuVo=4MI!?0^n;r1 z=Mv24d^elx)4P%u=`~R^%Ge71crAS#10sx*q z2dkl|6fqGIRa%TRhlm9s)K;OlXt`8qP7<{o= z2T@7oI-*I?g+?|LF3rfMuKT99C*&6Y0Q9ZF8cd-DA)%z)0GVCxgNQZg{R*M*5@W{q zMVh>X1vM}03*k2 z6i>L$peI(!6LZrEK!QRHTj0TX)d&vOfWV6(yJg=o`JdJ))iqwFxz5370yJji@R-do z)CPu*mN7>FA!PlT*!+h2(hH-pbi)w zNt-_K_lGrTO>e&W=9KAIx2+NwL`R8<>+hk{dX1j|a9t|Olu*@<&bA+E3sFc&yNP%W zQ9uj^6AURv%8a%MrK!1}0I|WirQM`CCZu~!q*T(Va*VWukZVn=tgr}Cue~iZn+Yp~ zWOmqyww$rW{aH=Gxkxr$7P~rclzWxHhFf zDGv}4@TL>Dv=L{v17@Fy)7Zuu*8G{q>^1h#xH5m0v@3z0Xz+LLT%1iCLGtv}YJ=oO zsQ;_F>NJcDier-Qn02inF(~~Vix^E^jHE2Nq&$Q+=W(L%6akf@pwbWdtX2$C50g1y z>OlY{p))?sxdbCwFIVX9bn>n>(P%9(!C$PCzQ#Zc--g@t2Yd;#beW^M0R%I({S^x4w5LCfBQ> zlRLRHx_#4_F^bCl069fSIo_HNA!sCX_A_Yml;ALpU0$X3v59URh-2Ligu~EUR=hW{ z@T)QrRw2EA_qV>k7ZC1{?ZJA9VS|7ki=GUo? zgG@)!;VJ>6Q?lw=eEijqjoQbAn9>*q*85y?!If7&_K{KjwMLBsjRK7VH<1Ff^h@~( zBqDGiQ=-+=hZn_+E+P)6>u7&}oD0wRXF0>H`skh5W>S(l0uUZ3Zz&rtAo9uFnR+(g z;I~kyZJ8|YqWO_O3DZyn{@uNI)IGE;qxI+Ro49S-oErri1zto7R0|9_opJn%aQGHn zLom&(){mjSoSB=wJlX4$1KdB)f(V?4{16E%kc3t*aB}GF-tA=NBf;5P)XjtvtyG)? zQSPQF8$dNfmpji(OkGfqjr+3N#yK)?RekJ?SW`{x7Xc#!LMHB(` z#DMgY#o7_-*ZmVY9~9s>X*;wdr3JnQYC6bw;ijwXFds{N-_XIao)RvuqM6co(<73i zZZ(XiMBD-HEZ)a-xm}C(mXVO9M*AOUL(cs^%AENxY_Al;z^cR%a}g< z14b^~(--8)XxcG-Y-*#vSZA!8Kzj}wq~-SFK}9@2s9uk@k$*224Jw~UVhkx@AUcH0 zxl#8)?Rgd{P-NY>8ifS{V90T+y4pC%HAg+;p@=HAi5XpqhAP8P!PGDx(z?%i4%cyS zN~@F3<=c+Zx@bL7Sd{PK96D493=O!7@UfF+&ducL;LPw8M?dyT4BW5=xi+}^J~>Oc z3tigKI=w{PgW5*G8w{P_Xc{qENTHfy_R$A1W{zd#jTk6ltZwegrD@0Xb?(K@LKY!c z)5j9`#29duq)gE#=c7OXQo?lfoHoUos}24mk2$}hbOuxD6Kg})hLJ*#7zUa&=5?iB zv;uq6V+sah`h|GbrTeM67n%Jh?o8k*k;kJiu4`YQ*n3}_A zRo`NDQdgcKPPH-AmNKg{g#l!Xi+1gV8r=CrobFF4*(aOK0(i_BA`W#G6v~2es%C&N zj3J{ARNRmvWXeNbqOBO!5QC4PKF5{hbkaj{=Fv<@mBC#R8q7C_U;4qQGJKSEmEfx( z4UMo&F-H)}=TOAbDl$loK8A7)Y0f=U58$s71rPJUT+)YSn`RKNHFFf@KEpg|%e?y% zqaJE1+|W8+h;^x*(vFaNWN6Q7ozTBla4+#LOzqRP!TdAz-rDvE+9)LA{;KV_%7-D- zj<5-0ZfB>-3XYRcz>d@;+j^1ZaOJ-FKg%b7gelS`+@!(lMv9>t{n_$kf5?PKQt@Xo>nC9 zY42vWfNh{2#dYqFsbf3iwttZ}@RO;1O=%yKG5UR+1B#@KQ~LD1GW78yu=FNM>szFYY`lAl5U=d+kfeeBxf zS{%HV6Ww{=nTbwK4MMu|XiC#KQ|LB!{@Z8=cx&jy9mApVC0%qf$BZ@P>)6IzYx{8e z+rDy!tjq;6H(uhi*)$f}llPFY);Mu4ydR||;*o^eq~+0l#qY}9g$?|pOh9Z2`8U-N&R}5iS*2}YXe~rD=2y(I?kEI*!&;($NFyFEt z7zgcPmL)^c%TAVJ&*WpX%o#U&iJzM zh~vWaOJB!eijJIMl733FUFz3p>x53CJtr&{}Ug7Wz<|^&Fq82`(9InR3n7y!B}F6)i?)IqsQOWhtSKP zGNrDHDrsV4k?7y{{tvnnURN|7A<)pWb|D;g6v11gMuA3wMu7?nOgFseL1Iy%$4`^# zCstHb<-M^N1(3vePmfcY-jr}~l{k>TIm!EJE2rd8`$vxq#Fx`~D-b0`Zsm-WS)fs& zughgt$Ig%Vd2IFNsd(4?4`bKW+BFI^3N#8#p+J?u;KZ&7sj^6s2A8@F;~z7bu{oW_ z^tD8r7~LZ^4?hUbe-~U6QlTs{$JS?~K%+pTz|s_`5*X>UZ-q_z8Wjdy3t%|HSC^jr zRrQb0L)pS*r{`)YKfLe3if;!lz7`5z~iHjT#zDi0f$Qrl^dM7{U!m zo7yGk?KWr>Yj{T{HWV7Vjq#@T0nqW8(#F_}|Aq-{y|}fG#WI}+%;dr8vX#t;X6X^n zw%4fX5NT4D0`+js0gOmwOuC;i)}^Czrar&R+V^zHj9u-A0JrW`wtsTmE1Z~$dZlo6 z&}gnw3GEc}Tu&Q~D{}RtXGs z5}nWbKShrK#Z3jW%~{0K_Y!I1OeCZP(YQ)N_&i<_BSGjVA(p$aUOkfoV-kD9{Mbtv z0cJJxqYNk^tseEW0ZUNR|E7BwuLtaTz4l?ei@*2*1%6SOWQ#BfWK&eSnM9mmu7Rh`V)KPMI;c+uq{wOO+ms$ov3vbQtl6bled zrN?!)7Xyx{afD^b{9;J1z9S5`d~Te$q03xNnNVZ(M_QBkch6u8v^{fNnzsHfWOJGC z0ox1Pzt`U3AAMYF%zvL+A}T$dCFgM6efM~QD?A}xwr1uO zYdFsl+(g-NQY41`vUzOl`=OGxv2RYuXd^nXl_y$H;Sv&9mT2j1&2{O1F!k^}wPr`o z59HYb02o1mB~m_CzLPHmAl)1=8=yF88=L$~@%i-NVXx_3ZQL0}di+;fpZ1m3ybZH? zmB7GP0sbH+&I5%#YE?I+(JKANdY&Dv8{t_B(HpX~3#VCbuwBwJRLA=~%Gpj*mAPuo z-V6W$3E@dZK~$f3v0OtIV+v_5mVTCwlD2un6g6JCj?(U(nB;oO3Fc0Q)SH+}qog%L zTY|&w|Hbp|%~1JyUFO5mbBzf8+Wit4KHxlQ8fDg9G$TJ)Xa7ZF=2fhB^j2hk&;#wF zcD)Q-CFUYB8z~Yj*#s2( zi-Lu=5?ZA%v}O*M-4p2{8g0YbUWN3g2ud$?a!QZhG?i`qHQ|ky5#c(%r`S;(8|S{9 zrKD(RNbl=-I&I8GfkuI=qCk~~(T6nVGN+VhJ+k6B4f5FpRKY5~rfG>3sT!je-hn((L^)O#T?_=KIKHr zDcdoFNS(zF>CW$5?CEB1gEs3%fkuHwfy*gSB``b#tVKoZH~E~D%Snt=Oc@P?-*;}A zQ_mPK2>?k_sCpNEdB$1m(*%MTyXq;Ze_aw52pchfB_^cyF>O5THAIHQq=1k>nQe7> zU*@~k9FC4&?jAo%rDHa|>F2$C{ql*~~vV&!sjuUf>K+4>aRg9O#{V#=FBkEv}z zNXIyL-%4gnE^A1W1I)j-=j#3mbDBp4x1NXg5Rp;<_iB;jQ`fVt_SC{AN+zQ8YY_VC zFWSPvyC~{r2#JoZJch(ZM6>z>eH>BGwRDdA0GtrN%W?7PfZ+Gi(nan0vCpC0f6%BF zQ4eB0(nND9>7sVg7ePp`BDLarLCS+uiR+q!Ft76XU~cl$yb)c@*i&TWblowEN~d_7 z;MA({h=-g(7AA~lAb~^wq z!B!ADdr^d&n62h643Dlk0V^@=nlWqFp}p2yFEWNk^D842BY3tfYg^I?Oo|S^mw_6` z?uROY0h4vDAlLIIb+`maH1$vrDW1+D0_j802oN})mLk`SfQaO$B7J$!%w-M&7m@Gj z>n-GT>rogaEhkmFQ|feWoT!ngEe{VO4*6sHHJI?zylcbcm@8w|_A~Z@KQHa{9C@w) z)MD)f)~W9h=6bFa6xsxkQ(IoHxgm(xL2Kp=BXkC1>mI6|F=l?QF_cA+2I^tNzSzT2 z_?BbE!rfanxCs}kUP5*=qW&`4#IV(f2dZ6%`k;17oM$x$0R%c41*iO8D>34Vz@WPIeXJ%2bJzS>hyG^X zmu<56eW_8PXPYJykvo-P|)_W8#m`Qk0OZ znmX0rQZ0&gr@4n$1fuPf}5=DH)SZL31~d|Glo=1=7Mh|5(Lwi z9%ZV|rvXx-r|P78+j}$Gj1rzvLn#H^oCo@%-0MPHyGVN{YnYpm z(j|!LsU@K+Uq)vD0f#}Jq1rh8>v|5(gtJEAj3wg?*uPWf1>oe1;fyr%w`B?PTb6m^_S+FNEeN(=dX93!{=1Gkf%xG>e7CBR*C6wenTnXfOASb zZyos5V@d4UY7SiGdLwOPg3BQxG-d78A`!EZGz@nTW{pCF6V((=IYbi0jVq0T6MRZ@ zkkm3RF?bmEkO?XZE(C)LhAQ03(GHp5UUwSE}Pu6#|i}USGU3s2S{htcE zckL@DRYN^?8dcVip7PM2hqnHL<^+NKz4qeeMMg&9X*296qPn34j5R%r#~pd4ZW^r| zH69qS*2DW*vlje5QR@9$K-=Xpv9BMdx#KfCue&vgq1$L@PDLqB`d!Of=D#4xnh>$7#Vhs zfrk7|Xa#W|Q^yt^4h{}7A4^&W<4^62_SCKDO^O5PY&1RH4cbQ&6oMlP z4By~c^TBB@e@ZlwMbO=xebK5f=C{)R+E_741Y(Vz%$xxotMPGjBsg7NAuT9wB z$%&qPEKwssU0)M)=HnRhm7ra?z>*E!?4Y!p6LE1PcTSshqd=p;3rB$}4daEQQd@AN zK%+pTK%+ocXE|) Date: Wed, 20 Jul 2022 14:06:49 +0800 Subject: [PATCH 18/21] env trainer Signed-off-by: zhaohu xing <920232796@qq.com> --- doc_zh/TUTORIAL_4_TRAINER.md | 71 +- docs/TUTORIAL_4_TRAINER.md | 76 ++ .../glm_title_generation/train_env_trainer.py | 144 +++ examples/vit_cifar100/train_env_trainer.py | 90 ++ flagai/env_args.py | 110 +++ flagai/env_trainer.py | 920 ++++++++++++++++++ flagai/trainer.py | 172 +--- 7 files changed, 1411 insertions(+), 172 deletions(-) create mode 100644 examples/glm_title_generation/train_env_trainer.py create mode 100644 examples/vit_cifar100/train_env_trainer.py create mode 100644 flagai/env_args.py create mode 100644 flagai/env_trainer.py diff --git a/doc_zh/TUTORIAL_4_TRAINER.md b/doc_zh/TUTORIAL_4_TRAINER.md index c70c7871..0e83ffb9 100644 --- a/doc_zh/TUTORIAL_4_TRAINER.md +++ b/doc_zh/TUTORIAL_4_TRAINER.md @@ -13,7 +13,7 @@ - [deepspeed](#deepspeed) - [pytorchDDP](#pytorchddp) - [deepspeed + megatron-lm](#deepspeed--megatron-lm) - +- [EnvTrainer](#EnvTrainer) Trainer 类提供了API用于多种并行框架的训练。API 支持在多个 GPU上使用Pytorch DDP/Deepspeed进行分布式训练,同时支持Megatron-LM+Deepspeed的混合并行分布式训练,同时也通过 NVIDIA Apex 实现混合精度。 ## 入门 @@ -335,3 +335,72 @@ trainer = MyTrainer( ) ``` +# EnvTrainer + +为了更容易的输入参数,我们提供了EnvTrainer代替原来的Trainer +例如: +```python +# train.py +import torch +from flagai.env_args import EnvArgs +from flagai.env_trainer import EnvTrainer + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +env_args = EnvArgs( + env_type="pytorch", + experiment_name="vit-cifar100-single_gpu", + batch_size=150, + num_gpus=1, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_single_gpu", + save_interval=1000, + num_checkpoints=1, +) + +env_args.add_arg(arg_name="test1", default=0, type=int, ) +env_args_parse = env_args.parse_args() +trainer = EnvTrainer(env_args) +``` + +运行train.py文件时,可以通过命令行修改输入参数。 +```commandline +python train.py --batch_size=8 --epochs=10 +``` +如果你需要添加额外的参数,你可以调用这个函数: +```python +env_args.add_arg(arg_name="test1", default=0, type=int, ) +``` +然后你可以运行如下命令中的train.py文件: +```commandline +python train.py --test1=1 +``` +更多的例子可以查看 : + +1. [vit-env-trainer](https://github.com/BAAI-Open/FlagAI/tree/master/examples/vit_cifar100/train_env_trainer.py) + +2. [glm-title-generation-env-trainer](https://github.com/BAAI-Open/FlagAI/tree/master/examples/glm_title_generation/train_env_trainer.py) + + +# 使用 pytorchDDP launcher 或 deepspeed launcher 运行 +如果你使用多个GPU来训练模型,你可以直接运行train.py来调用FlagAI训练器中的启动器。 +```commandline +python train.py +``` +另外,你也可以使用pytorchDDP和deepspeed启动器来运行,例如: +### pytorchDDP +```commandline +python -m torch.distributed.launch --nproc_per_node 2 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 17750 train_env_trainer.py --not_call_launch +``` +### deepspeed +```commandline +python -m deepspeed.launcher.launch --master_addr=172.31.125.121 --master_port=17500 train.py --not_call_launch +``` diff --git a/docs/TUTORIAL_4_TRAINER.md b/docs/TUTORIAL_4_TRAINER.md index f78526b4..2ec7d235 100644 --- a/docs/TUTORIAL_4_TRAINER.md +++ b/docs/TUTORIAL_4_TRAINER.md @@ -13,6 +13,9 @@ - [deepspeed](#deepspeed) - [pytorchDDP](#pytorchddp) - [deepspeed + megatron-lm](#deepspeed--megatron-lm) +- [EnvTrainer](#EnvTrainer) + + The Trainer class provides APIs for training with multiple parallel frameworks. The API supports distributed training with Pytorch DDP/Deepspeed on multiple GPUs, as well as mixed parallel distributed training with Megatron-LM+Deepspeed, and mixed precision via NVIDIA Apex. ## Getting Started @@ -341,3 +344,76 @@ trainer = MyTrainer( model_paralle_size = 2 ) ``` + +# EnvTrainer + +To input the parameters easier, we provided the EnvTrainer to replace the original Tranier. + +Taking the code for example: +```python +# train.py +import torch +from flagai.env_args import EnvArgs +from flagai.env_trainer import EnvTrainer + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +env_args = EnvArgs( + env_type="pytorch", + experiment_name="vit-cifar100-single_gpu", + batch_size=150, + num_gpus=1, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_single_gpu", + save_interval=1000, + num_checkpoints=1, +) + +env_args.add_arg(arg_name="test1", default=0, type=int, ) +env_args_parse = env_args.parse_args() +trainer = EnvTrainer(env_args) +``` + +When you run the train.py file, you can modify the input parameters through command line. +```commandline +python train.py --batch_size=8 --epochs=10 +``` +If you need to add additional parameters, you can call the function: +```python +env_args.add_arg(arg_name="test1", default=0, type=int, ) +``` +Then you can run the train.py file in the following command: +```commandline +python train.py --test1=1 +``` + +More examples in : + +1. [vit-env-trainer](https://github.com/BAAI-Open/FlagAI/tree/master/examples/vit_cifar100/train_env_trainer.py) + +2. [glm-title-generation-env-trainer](https://github.com/BAAI-Open/FlagAI/tree/master/examples/glm_title_generation/train_env_trainer.py) + + +# Run with pytorchDDP launcher or deepspeed launcher +If you use multiple GPU to train models, you can run the train.py directly which to call the launcher in FlagAI Trainer. +```commandline +python train.py +``` +In addition, you also can use the pytorchDDP and deepspeed launcher to run, as example: + +### pytorchDDP +```commandline +python -m torch.distributed.launch --nproc_per_node 2 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 17750 train_env_trainer.py --not_call_launch +``` +### deepspeed +```commandline +python -m deepspeed.launcher.launch --master_addr=172.31.125.121 --master_port=17500 train.py --not_call_launch +``` \ No newline at end of file diff --git a/examples/glm_title_generation/train_env_trainer.py b/examples/glm_title_generation/train_env_trainer.py new file mode 100644 index 00000000..39c4524f --- /dev/null +++ b/examples/glm_title_generation/train_env_trainer.py @@ -0,0 +1,144 @@ +# Copyright © 2022 BAAI. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License") +import os +import numpy as np +import torch +from torch.utils.data import Dataset +from flagai.auto_model.auto_loader import AutoLoader +from flagai.env_trainer import EnvTrainer +from flagai.env_args import EnvArgs +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# You can input all parameters by the command line. +# For example: python train_env_trainer.py --epochs=300 --batch_size=4 --env_type=pytorch +env_args = EnvArgs() +trainer = EnvTrainer(env_args) + +cur_dir = os.path.dirname(os.path.abspath(__file__)) +src_dir = cur_dir + '/data/train.src' +tgt_dir = cur_dir + '/data/train.tgt' + +maxlen = 256 +auto_loader = AutoLoader("lm", + model_name="GLM-large-ch", + model_dir="./state_dict/") +model = auto_loader.get_model() +tokenizer = auto_loader.get_tokenizer() + +def read_file(): + src = [] + tgt = [] + + with open(src_dir, 'r', encoding='utf-8') as f: + lines = f.readlines() + for line in lines: + src.append(line.strip('\n').lower()) + + with open(tgt_dir, 'r', encoding='utf-8') as f: + lines = f.readlines() + for line in lines: + tgt.append(line.strip('\n').lower()) + + return src, tgt + + +class GLMSeq2seqDataset(Dataset): + + def __init__(self, + sents_src, + sents_tgt, + tokenizer, + max_src_length=300, + max_tgt_length=200): + super(GLMSeq2seqDataset, self).__init__() + self.sents_src = sents_src + self.sents_tgt = sents_tgt + self.tokenizer = tokenizer + self.max_src_length = max_src_length + self.max_tgt_length = max_tgt_length + self.no_block_position = False + + def __getitem__(self, i): + source_text = self.sents_src[i] + target_text = self.sents_tgt[i] + data = self.tokenizer.encode_plus(source_text, target_text) + + return data + + def __len__(self): + + return len(self.sents_src) + + +class GLMPoetryDynamicCollateFN(): #padding process in each batch + + def __init__(self, pad_id): + self.pad_id = pad_id + + def pad_token(self, tokens, max_length): + pad_len = max_length - len(tokens) + tokens += [self.pad_id] * pad_len + return tokens + + def pad_position_ids(self, position_ids, max_length): + pad_len = max_length - len(position_ids[0]) + position_ids[0] += [len(position_ids[0]) + x for x in range(pad_len)] + position_ids[1] += [1] * pad_len + return position_ids + + def pad_loss_mask(self, loss_mask, max_length): + pad_len = max_length - len(loss_mask) + loss_mask += [0] * pad_len + return loss_mask + + def __call__(self, batch): + input_ids = [data["input_ids"] for data in batch] + target_ids = [data["target_ids"] for data in batch] + position_ids = [data["position_ids"] for data in batch] + attention_mask = [data['attention_mask'] for data in batch] + loss_mask = [data['loss_mask'] for data in batch] + + max_length = max([len(t) for t in input_ids]) + for i in range(len(input_ids)): + input_ids[i] = self.pad_token(input_ids[i], max_length) + target_ids[i] = self.pad_token(target_ids[i], max_length) + position_ids[i] = self.pad_position_ids(position_ids[i], + max_length) + loss_mask[i] = self.pad_loss_mask(loss_mask[i], max_length) + return { + 'input_ids': torch.LongTensor(input_ids), + 'labels': torch.LongTensor(target_ids), + 'position_ids': torch.LongTensor(position_ids), + 'attention_mask': torch.LongTensor(attention_mask), + 'loss_mask': torch.LongTensor(loss_mask) + } + + +sents_src, sents_tgt = read_file() +my_collate_fn = GLMPoetryDynamicCollateFN( + pad_id=tokenizer.get_command('pad').Id) + +data_len = len(sents_tgt) +train_size = int(data_len * 0.8) +train_src = sents_src[:train_size][:2000] +train_tgt = sents_tgt[:train_size][:2000] + +val_src = sents_src[train_size:] +val_tgt = sents_tgt[train_size:] + +train_dataset = GLMSeq2seqDataset(train_src, + train_tgt, + tokenizer=tokenizer, + max_src_length=300, + max_tgt_length=200) +val_dataset = GLMSeq2seqDataset(val_src, + val_tgt, + tokenizer=tokenizer, + max_src_length=300, + max_tgt_length=200) + +trainer.train(model, + train_dataset=train_dataset, + valid_dataset=val_dataset, + collate_fn=my_collate_fn) diff --git a/examples/vit_cifar100/train_env_trainer.py b/examples/vit_cifar100/train_env_trainer.py new file mode 100644 index 00000000..fcb153cb --- /dev/null +++ b/examples/vit_cifar100/train_env_trainer.py @@ -0,0 +1,90 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.env_trainer import EnvTrainer +from flagai.auto_model.auto_loader import AutoLoader +import argparse + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +from flagai.env_args import EnvArgs + +env_args = EnvArgs( + env_type="pytorch", + experiment_name="vit-cifar100-single_gpu", + batch_size=64, + num_gpus=1, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=100, + eval_interval=1000, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_single_gpu", + save_interval=1000, + num_checkpoints=1, +) + +env_args.add_arg(arg_name="test_args", default=0, type=int, ) +env_args = env_args.parse_args() +trainer = EnvTrainer(env_args) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + optimizer=optimizer, + lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/flagai/env_args.py b/flagai/env_args.py new file mode 100644 index 00000000..49c5ce29 --- /dev/null +++ b/flagai/env_args.py @@ -0,0 +1,110 @@ +import argparse + +def save_best(best_score, eval_dict): + return best_score if best_score < eval_dict['loss'] else eval_dict['loss'] + +def str2bool(v): + if isinstance(v,bool): + return v + if v == 'True': + return True + if v == 'False': + return False + +class EnvArgs: + def __init__(self, + env_type="pytorch", + experiment_name="test_experiment", + epochs=1, + batch_size=1, + lr=1e-5, + seed=1234, + + fp16=False, + pytorch_device="cpu", + clip_grad=1.0, + checkpoint_activations=False, + gradient_accumulation_steps=1, + weight_decay=1e-5, + warm_up=0.1, + + log_interval=100, + eval_interval=1000, + save_interval=1000, + + save_dir=None, + load_dir=None, + save_optim=False, # save current optimizer.') + save_rng=False, # save current rng state.') + load_type='latest', # latest, best + load_optim=False, # not load optimizer when loading checkpoint.') + load_rng=False, + tensorboard_dir="tensorboard_summary", + + # distribute settings + deepspeed_activation_checkpointing=False, + num_checkpoints=1, + master_ip='localhost', + master_port=17750, + num_nodes=1, + num_gpus=1, + hostfile="./hostfile", + deepspeed_config="./deepspeed.json", + model_parallel_size=1, + training_script="train.py", + ): + + self.parser = argparse.ArgumentParser(description='Env args parser') + self.parser.add_argument('--env_type', default=env_type, help='the model will be trained') + self.parser.add_argument('--experiment_name', default=experiment_name, help='start training from saved checkpoint') + self.parser.add_argument('--epochs', default=epochs, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--batch_size', default=batch_size, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--lr', default=lr, type=float, help='start training from saved checkpoint') + self.parser.add_argument('--seed', default=seed, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--fp16', default=fp16, type=str2bool, help='start training from saved checkpoint') + self.parser.add_argument('--pytorch_device', default=pytorch_device, help='start training from saved checkpoint') + self.parser.add_argument('--clip_grad', default=clip_grad, type=float, help='start training from saved checkpoint') + self.parser.add_argument('--checkpoint_activations', default=checkpoint_activations, type=str2bool, help='start training from saved checkpoint') + self.parser.add_argument('--gradient_accumulation_steps', default=gradient_accumulation_steps, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--weight_decay', default=weight_decay, type=float, help='start training from saved checkpoint') + self.parser.add_argument('--warm_up', default=warm_up, type=float, help='start training from saved checkpoint') + self.parser.add_argument('--log_interval', default=log_interval, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--eval_interval', default=eval_interval, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--save_interval', default=save_interval, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--save_dir', default=save_dir, help='start training from saved checkpoint') + self.parser.add_argument('--load_dir', default=load_dir, help='start training from saved checkpoint') + self.parser.add_argument('--save_optim', default=save_optim, type=str2bool, help='start training from saved checkpoint') + self.parser.add_argument('--save_rng', default=save_rng, type=str2bool,help='start training from saved checkpoint') + self.parser.add_argument('--load_type', default=load_type, type=str2bool,help='start training from saved checkpoint') + self.parser.add_argument('--load_optim', default=load_optim, type=str2bool,help='start training from saved checkpoint') + self.parser.add_argument('--load_rng', default=load_rng, type=str2bool, help='start training from saved checkpoint') + self.parser.add_argument('--tensorboard_dir', default=tensorboard_dir, help='start training from saved checkpoint') + self.parser.add_argument('--deepspeed_activation_checkpointing', default=deepspeed_activation_checkpointing, help='start training from saved checkpoint') + self.parser.add_argument('--num_checkpoints', default=num_checkpoints, help='start training from saved checkpoint') + self.parser.add_argument('--deepspeed_config', default=deepspeed_config, help='start training from saved checkpoint') + self.parser.add_argument('--model_parallel_size', default=model_parallel_size, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--training_script', default=training_script, help='start training from saved checkpoint') + + self.parser.add_argument('--hostfile', default=hostfile, help='start training from saved checkpoint') + self.parser.add_argument('--master_ip', default=master_ip, help='start training from saved checkpoint') + self.parser.add_argument('--master_port', default=master_port, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--num_nodes', default=num_nodes, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--num_gpus', default=num_gpus, type=int, help='start training from saved checkpoint') + self.parser.add_argument('--not_call_launch', action="store_true", help='start training from saved checkpoint') + self.parser.add_argument('--local_rank', default=0, type=int, help='start training from saved checkpoint') + + def add_arg(self, arg_name, default=None, type=str, help="", store_true=False): + if store_true: + self.parser.add_argument(f"--{arg_name}", default=default, type=type, action="store_true", help=help) + else : + self.parser.add_argument(f"--{arg_name}", default=default, type=type, help=help) + + + def parse_args(self): + args = self.parser.parse_args() + if args.env_type == "pytorch": + # not need the "not_call_launch" parameter + args.not_call_launch = True + + return args + diff --git a/flagai/env_trainer.py b/flagai/env_trainer.py new file mode 100644 index 00000000..c7ef8678 --- /dev/null +++ b/flagai/env_trainer.py @@ -0,0 +1,920 @@ +# Copyright © 2022 BAAI. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License") +# Arguments for training +try: + import deepspeed.utils + import deepspeed +except: + pass +try: + from flagai import mpu +except Exception: + pass + +import torch +import argparse +import os +import random +import numpy as np +import torch.distributed as dist +from flagai.logger import log_dist +from torch.utils.tensorboard import SummaryWriter +from flagai.utils import load_checkpoint, save_checkpoint, load_optim, load_rng +from flagai.schedulers import AnnealingLR +from flagai.optimizers import get_optimizer, get_optimizer_param_groups +from flagai.fp16 import FP16_Module +from flagai.utils import Timers +from flagai.launch import launch_dist +from torch.nn.parallel import DistributedDataParallel as DDP +from flagai.fp16 import DynamicLossScaler +""" +The Trainer class, to easily train a pytorh model on a new task. +""" +def save_best(best_score, eval_dict): + return best_score if best_score < eval_dict['loss'] else eval_dict['loss'] + +def get_args_list(env_args): + not_need_to_launch_args = ["not_call_launch", "local_rank", "master_port", "master_ip", "hostfile", "num_gpus", "num_nodes"] + args_list = [] + args = dir(env_args) + for arg in args: + if not arg.startswith("__") and not arg.startswith("_") and arg not in not_need_to_launch_args: + args_list.append(f"--{arg}") + args_list.append(str(getattr(env_args, arg))) + + print(f"args list is {args_list}") + return args_list + +class EnvTrainer(): + def __init__(self, + env_args, + ): + self.timers = Timers() + self.env_type = env_args.env_type + if self.env_type not in set( + ["deepspeed", 'pytorch', 'pytorchDDP', 'deepspeed+mpu']): + raise Exception("Not supported env_type!!!!") + os.environ["ENV_TYPE"] = env_args.env_type + self.experiment_name = env_args.experiment_name + self.batch_size = env_args.batch_size + self.gradient_accumulation_steps = env_args.gradient_accumulation_steps + self.lr = env_args.lr + self.weight_decay = env_args.weight_decay + self.epochs = env_args.epochs + self.clip_grad = env_args.clip_grad + self.seed = env_args.seed + self.fp16 = env_args.fp16 + self.warm_up = env_args.warm_up + + self.log_interval = env_args.log_interval + self.eval_interval = env_args.eval_interval + + # model checkpointing + self.save_dir = env_args.save_dir + self.save_interval = env_args.save_interval + self.save_optim = env_args.save_optim + self.save_rng = env_args.save_rng + self.save_best = save_best + self.load_dir = env_args.load_dir + self.load_type = env_args.load_type + self.load_optim = env_args.load_optim + self.load_rng = env_args.load_rng + self.tb_writer = SummaryWriter( + os.path.join(env_args.tensorboard_dir, env_args.experiment_name)) + + # distribute settings + self.pytorch_device = env_args.pytorch_device + self.checkpoint_activations = env_args.checkpoint_activations + self.deepspeed_activation_checkpointing = env_args.deepspeed_activation_checkpointing + self.num_checkpoints = env_args.num_checkpoints + self.env_type = env_args.env_type + self.not_call_launch = env_args.not_call_launch + self.deepspeed_config = env_args.deepspeed_config + self.model_parallel_size = env_args.model_parallel_size + self.num_nodes = env_args.num_nodes + self.num_gpus = env_args.num_gpus + self.master_ip = env_args.master_ip + self.master_port = env_args.master_port + self.hostfile = env_args.hostfile + self.training_script = env_args.training_script + + if 'deepspeed' in self.env_type or self.env_type == 'pytorchDDP': + training_paras = get_args_list(env_args) + self.rank = int(os.environ.get('RANK', 0)) + self.world_size = int(os.environ.get('WORLD_SIZE', 1)) + self.local_rank = env_args.local_rank + log_dist("not_call_launch: {}".format(self.not_call_launch)) + # Implement for AutoLaunch + # >>> python train.py # will call get_dist_args() + # `--not_call_launch` is default 'False' + # So, if `env_type` is `pytorch`, the `Trainer` will not call lanch_dist() + # Otherwise, the lanch_dist() is called to launch 'train.py' with `--not_call_launch` + if not self.not_call_launch: + launch_dist(launcher='distributed_deepspeed' if 'deepspeed' + in self.env_type else 'distributed_torch', + num_nodes=self.num_nodes, + gpus_per_node=self.num_gpus, + master_addr=self.master_ip, + master_port=self.master_port, + hostfile=self.hostfile, + training_script=self.training_script, + training_paras=training_paras) + os._exit(1) + self.initialize_distributed() + + def set_seed(self, seed=1234): + """Set random seed for reproducability.""" + if seed is not None and seed > 0: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if self.env_type == 'deepspeed+mpu': + mpu.model_parallel_cuda_manual_seed(seed) + + def initialize_distributed(self): + """Initialize torch.distributed.""" + if self.env_type == 'pytorch': + log_dist('No need to initialize') + return + if self.env_type in ['deepspeed', 'deepspeed+mpu', 'pytorchDDP']: + torch.backends.cudnn.enabled = False + # Manually set the device ids. + device = self.rank % torch.cuda.device_count() + if self.local_rank is not None: + device = self.local_rank + torch.cuda.set_device(device) + # Call the init process + init_method = 'tcp://' + self.master_ip = os.getenv('MASTER_ADDR', 'localhost') + self.master_port = os.getenv('MASTER_PORT', '6000') + + init_method += self.master_ip + ':' + self.master_port + log_dist( + "init method {}, rank {}, device {}, local_rank {}.".format( + init_method, self.rank, device, self.local_rank)) + torch.distributed.init_process_group( + backend='nccl', # gloo + world_size=self.world_size, + rank=self.rank, + init_method=init_method) + # Set the model-parallel / data-parallel communicators. + if self.env_type == 'deepspeed+mpu': + os.environ["MODEL_PARALLEL_SIZE"] = str(self.model_parallel_size) + try: + mpu.initialize_model_parallel(self.model_parallel_size) + if 'deepspeed' in self.env_type and self.deepspeed_activation_checkpointing: + deepspeed.checkpointing.configure( + mpu, + deepspeed_config=self.deepspeed_config, + num_checkpoints=self.num_checkpoints) + mpu.checkpoint = deepspeed.checkpointing.checkpoint + mpu.get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker + mpu.model_parallel_cuda_manual_seed = deepspeed.checkpointing.model_parallel_cuda_manual_seed + except Exception as e: + log_dist(e) + log_dist("No mpu is installed! No model parallel is used") + log_dist("initialize eviroments succesed") + self.set_seed(self.seed) + + def get_dataloader(self, dataset, collate_fn, shuffle=False): + """ initilize the dataloader""" + if dataset is None: + return None + if self.env_type == 'pytorch': + return torch.utils.data.DataLoader(dataset, + batch_size=self.batch_size, + collate_fn=collate_fn, + num_workers=4, + prefetch_factor=4, + pin_memory=True, + drop_last=False, + shuffle=shuffle) + else: + if self.env_type == 'deepspeed+mpu': + # num_replicas = self.world_size // mpu.get_model_parallel_world_size( + # ) + # rank = self.rank // mpu.get_model_parallel_world_size() + # rank = mpu.get_model_parallel_rank() + rank = mpu.get_model_parallel_src_rank() + print("*"*80) + print("local rank",self.rank, "model rank", rank) + print("*"*80) + sampler = torch.utils.data.distributed.DistributedSampler( + dataset, + # num_replicas=num_replicas, + rank=rank, + shuffle=shuffle) + else: + num_replicas = self.world_size + rank = self.rank + sampler = torch.utils.data.distributed.DistributedSampler( + dataset, rank=rank, shuffle=shuffle) + return torch.utils.data.DataLoader(dataset, + batch_size=self.batch_size, + sampler=sampler, + num_workers=4, + drop_last=False, + pin_memory=False, + prefetch_factor=4, + collate_fn=collate_fn) + + def train(self, + model=None, + optimizer=None, + lr_scheduler=None, + train_dataset=None, + valid_dataset=None, + metric_methods=[], + collate_fn=None): + """Training Loops""" + """ + Trainer is a simple but unifed training and eval loop for PyTorch/Deepspeed/Megatron-LM. + Args: + model (`torch.nn.Module`, *optional*): + The model to train, evaluate or use for predictions. + args ([`env_type`]): + The enviroment type for training. Will default to 'pytorch'. + env_type: `pytorch`, `pytorchDDP`, `deepspeed`, `deepspeed+mpu` + pytorch: single node cpu/gpu + pytorchDDP: single-/multi- node gpu + deepspeed: single-/multi- node gpu + deepspeed+mpu: single-/multi- node gpu + train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.DataLoader`, *optional*): + The dataset to use for training. + If it is an `Dataset`, we will create a `DataLoader` with the provided `Dataset` and `collate_fn' for the selected `env_type`. + `Dataset` is prefred to iterally return a sample as followings, + >>> {'text': 'I like big model.', 'label': 'positive'} + If it is an `DataLoader`, we will directly use it. + Important: Columns not accepted by the `model.forward()` method are automatically droped. + eval_dataset (`torch.utils.data.Dataset` or `torch.utils.data.DataLoader`, *optional*): + The dataset to use for evaluation. Similar to `train_dataset`. + collate_fn (`DataCollator` or `function`, *optional*): + The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. + metrics (`function`, *optional*): + The function that will be used to compute metrics at evaluation. Must return + a dictionary string to metric values. + optimizers (`torch.optim.Optimizer`, *optional*): A optimizer to use. Will default to an instance of + [`AdamW`] on your model. + lr_scheduler (`torch.optim.lr_scheduler`, *optional*): A lr_scheduler to use. Will default to an instance of + [`AnnealingLR`]. + """ + if not isinstance(train_dataset, torch.utils.data.DataLoader): + train_dataloader = self.get_dataloader(train_dataset, collate_fn, + True) + else: + train_dataloader = train_dataset + + if not isinstance(valid_dataset, torch.utils.data.DataLoader): + + valid_dataloader = self.get_dataloader(valid_dataset, collate_fn, + False) + else: + valid_dataloader = valid_dataset + + if self.load_dir: + log_dist("loading checkpoints form {}".format(self.load_dir)) + sd = load_checkpoint(model, + load_dir=self.load_dir, + load_type=self.load_type) + """Train the model.""" + # Turn on training mode which enables dropout. + model.train() + if self.fp16 and self.env_type == 'pytorchDDP': + log_dist( + "Warning: The pytorchDDP plus FP16 may not working togather!!!" + ) + if self.fp16: + model.half() + if self.checkpoint_activations: + model.config[ + 'checkpoint_activations'] = self.checkpoint_activations + + if self.env_type == 'pytorchDDP': + model.to(torch.device('cuda', self.local_rank)) + model = DDP(model, + device_ids=[self.local_rank], + find_unused_parameters=True) + + elif self.env_type == 'pytorch': + model.to(self.pytorch_device) + else: + model.cuda(torch.device('cuda', self.local_rank)) + if self.fp16: + model = FP16_Module(model) + + param_groups = get_optimizer_param_groups(model) + + if hasattr(param_groups[0], 'params'): + # for T5 Model + param_groups = param_groups[0]['params'] + + if optimizer is None and 'deepspeed' not in self.env_type and self.epochs > 0: + optimizer = get_optimizer( + param_groups=param_groups, + lr=self.lr, + weight_decay=self.weight_decay, + cpu_optimizer=False, + cpu_torch_adam=False, + fp16=self.fp16, + optimizer='adam') # if not self.fp16 else 'adafactor') + + if lr_scheduler == None and optimizer != None and self.warm_up > 0 and 'deepspeed' not in self.env_type and self.epochs > 0: + + lr_scheduler = AnnealingLR( + optimizer, + start_lr=self.lr, + warmup_iter=int(self.warm_up * self.epochs * + len(train_dataloader)), + decay_style='linear', + num_iters=self.epochs * len(train_dataloader)) + + if 'deepspeed' in self.env_type: + # initialize the deepspeed + model, optimizer, _, lr_scheduler = deepspeed.initialize( + model=model, + # if huggingface t5: param_groups[0]['params'] + model_parameters=param_groups, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + mpu=mpu if self.env_type == 'deepspeed+mpu' else None, + config=self.deepspeed_config, + dist_init_required=True) + if self.load_optim: + print(self.load_optim) + print(type(self.load_optim)) + load_optim(optimizer, lr_scheduler, sd) + if self.load_rng: + load_rng(sd) + # Tracking loss. + total_lm_loss = 0.0 + self.iteration = 0 + self.accumulate_count = 0 + best_iteration = 0 + best_loss = float('inf') + # For each remaining epoch + self.timers('interval time').start() + # self.eval_metrics = eval_metrics + # self.do_eval = valid_dataset!=None + self.metric_methods = metric_methods + best_score = float('inf') + if len(self.metric_methods) > 0: + best_score = -best_score + + for epoch in range(self.epochs): + # log_dist('working on epoch {} ...'.format(epoch), [0]) + # Set the data loader epoch to shuffle the index iterator. + # if self.env_type == 'deepspeed+mpu': + # if mpu.get_model_parallel_rank() == 0: + # train_dataloader.sampler.set_epoch(epoch + self.world_size) + if self.env_type != 'pytorch': + train_dataloader.sampler.set_epoch(epoch + self.world_size) + + # For all the batches in the dataset. + for iteration_, batch in enumerate(train_dataloader): + # Train for one step. + if 'deepspeed' in self.env_type or self.env_type == 'pytorchDDP': + batch = { + x: batch[x].to(torch.device('cuda', self.local_rank)) + for x in batch if x not in ['uid', 'meta', 'mode'] + } + elif 'pytorch' == self.env_type: + batch = { + x: batch[x].to(torch.device(self.pytorch_device)) + for x in batch if x not in ['uid', 'meta', 'mode'] + } + if self.env_type == 'pytorchDDP': + lm_loss, _ = self.train_step_pytorchDDP( + batch, model, optimizer, lr_scheduler) + dist.barrier() + + elif self.env_type == 'pytorch': + lm_loss, _ = self.train_step_pytorch( + batch, model, optimizer, lr_scheduler) + else: + lm_loss, _ = self.train_step_deepspeed(batch, + model, + optimizer, + lr_scheduler, + single_step=True) + dist.barrier() + if lm_loss is not None: + total_lm_loss += lm_loss.data.detach().float() + + # Logging. + if (self.iteration + 1) % self.log_interval == 0: + if optimizer is not None: + learning_rate = optimizer.param_groups[0]['lr'] + else: + learning_rate = model.optimizer.param_groups[0]['lr'] + avg_lm_loss = total_lm_loss.item() / self.log_interval + elapsed_time = self.timers('interval time').elapsed() + self.report_iteration_metrics( + optimizer, learning_rate, avg_lm_loss, + elapsed_time * 1000.0 / self.log_interval, + self.iteration + 1, + self.epochs * len(train_dataloader)) + self.tb_writer.add_scalar('train/loss', avg_lm_loss, + self.iteration + 1) + self.tb_writer.add_scalar('lr', learning_rate, + self.iteration + 1) + total_lm_loss = 0.0 + # Evaluation #todo add train_args + if self.eval_interval and ( + self.iteration + 1 + ) % self.eval_interval == 0 and valid_dataloader is not None: + self.timers.log(['forward', 'backward', 'optimizer'], + normalizer=self.eval_interval) + prefix = 'epoch {}'.format(epoch) + eval_dict = self.evaluate_and_print_results( + prefix=prefix, + data_loader=valid_dataloader, + model=model, + forward_step_func=self.forward_step, + verbose=False) + if eval_dict is not None: + eval_loss = eval_dict.get("loss", 0.0) + self.tb_writer.add_scalar('eval/loss', eval_loss, + self.iteration + 1) + for i in range(len(self.metric_methods)): + name = self.metric_methods[i][0] + score = eval_dict.get(name, 0) + self.tb_writer.add_scalar( + 'eval_metrics/%s' % (name), score, + self.iteration + 1) + + if self.save_best is not None and self.save_best(best_score, eval_dict) != best_score: + best_score = self.save_best(best_score, eval_dict) + log_dist("saving best model with score {:.4f}".format(best_score)) + best_iteration = self.iteration + save_checkpoint(self.iteration+1, + best_iteration+1, + + model, + optimizer, + lr_scheduler, + save_optim=self.save_optim, + save_dir=self.save_dir, + save_rng=self.save_rng) + if self.save_dir and (self.iteration + 1) % self.save_interval == 0 and \ + self.iteration != best_iteration: + save_checkpoint(self.iteration+1, + best_iteration+1, + model, + optimizer, + lr_scheduler, + save_optim=self.save_optim, + save_dir=self.save_dir, + save_rng=self.save_rng) + self.iteration += 1 + + # Checkpointing at the end of each epoch. + + # Evaluation #todo add train_args + if ((self.epochs == 0) or (self.eval_interval and + (self.iteration ) % self.eval_interval != 0) + ) and valid_dataloader is not None: + prefix = 'final evaluate' + self.evaluate_and_print_results( + prefix=prefix, + data_loader=valid_dataloader, + model=model, + forward_step_func=self.forward_step, + verbose=False) + + def train_step_pytorch(self, + data, + model, + optimizer, + lr_scheduler, + mems=None): + """Single training step.""" + # Forward model for one step. + self.timers('forward').start() + step_output = self.forward_step(data, model, mems) + self.timers('forward').stop() + # accumulate gradients + lm_loss = step_output['loss'] + lm_loss /= self.gradient_accumulation_steps + reduced_loss = lm_loss.detach().clone().view(1) + # skip the iter while loss has NAN + if not DynamicLossScaler._has_inf_or_nan(reduced_loss): + # Calculate gradients, reduce across processes, and clip. + self.timers('backward').start() + if self.fp16 and hasattr(optimizer, 'backward'): + optimizer.backward(lm_loss, + update_master_grads=False, + retain_graph=True) + else: + lm_loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), self.clip_grad) + self.timers('backward').stop() + + # Update parameters. + self.timers('optimizer').start() + if (self.accumulate_count + + 1) % self.gradient_accumulation_steps == 0: + if self.fp16: + # optimizer.update_master_grads() + optimizer.step() + optimizer.zero_grad() + else: + optimizer.step() + # optimizer.zero_grad() + self.accumulate_count = 0 + else: + self.accumulate_count += 1 + if lr_scheduler: + lr_scheduler.step() + self.timers('optimizer').stop() + + else: + log_dist("Found NaN loss, skip backward", [0]) + del lm_loss, reduced_loss + mems = None + reduced_loss = None + return reduced_loss, mems + + def train_step_pytorchDDP(self, + data, + model, + optimizer, + lr_scheduler, + mems=None): + """Single training step.""" + + from contextlib import nullcontext + if self.fp16: + no_sync = model.module.no_sync + else: + no_sync = model.no_sync + + mycontext = no_sync if ( + self.accumulate_count + + 1) != self.gradient_accumulation_steps else nullcontext + + with mycontext(): + # Forward model for one step. + self.timers('forward').start() + step_output = self.forward_step(data, model, mems) + self.timers('forward').stop() + + # accumulate gradients + lm_loss = step_output['loss'] + lm_loss /= self.gradient_accumulation_steps + # reduce sum of losses + reduced_loss = lm_loss.detach().clone().view(1) + # dist.all_reduce(reduced_loss.data) + # reduced_loss.data = reduced_loss.data / self.world_size + + # skip the iter while loss has NAN + if not DynamicLossScaler._has_inf_or_nan(reduced_loss): + # Calculate gradients, reduce across processes, and clip. + self.timers('backward').start() + + if self.fp16 and hasattr(optimizer, 'backward'): + log_dist("The optimizer has backward function") + optimizer.backward(lm_loss, + update_master_grads=False, + retain_graph=True) + else: + lm_loss.backward() + + torch.nn.utils.clip_grad_norm_(model.module.parameters(), + self.clip_grad) + self.timers('backward').stop() + + # Update parameters. + self.timers('optimizer').start() + if (self.accumulate_count + + 1) % self.gradient_accumulation_steps == 0: + if self.fp16: + optimizer.update_master_grads() + optimizer.step() + optimizer.zero_grad() + else: + optimizer.step() + # model.zero_grad() + + self.accumulate_count = 0 + else: + self.accumulate_count += 1 + if lr_scheduler: + lr_scheduler.step() + self.timers('optimizer').stop() + dist.barrier() + + else: + log_dist("Found NaN loss, skip backward", [0]) + del lm_loss, reduced_loss + mems = None + reduced_loss = None + return reduced_loss, mems + + def train_step_deepspeed(self, + data, + model, + optimizer, + lr_scheduler, + mems=None, + single_step=False): + """Single training step.""" + + # Forward model for one step. + if (self.accumulate_count + 1) % self.gradient_accumulation_steps == 0: + model.set_gradient_accumulation_boundary(True) + else: + model.set_gradient_accumulation_boundary(False) + self.timers('forward').start() + step_output = self.forward_step(data, model, mems) + self.timers('forward').stop() + lm_loss = step_output['loss'] + reduced_loss = lm_loss.detach().clone().view(1) + + if self.env_type == 'deepspeed+mpu': + torch.distributed.all_reduce(reduced_loss.data, + group=mpu.get_data_parallel_group()) + elif self.env_type == 'deepspeed': + torch.distributed.all_reduce(reduced_loss.data) + if 'deepspeed' in self.env_type: + reduced_loss.data = reduced_loss.data / \ + (self.world_size / self.model_parallel_size) + if not DynamicLossScaler._has_inf_or_nan(reduced_loss): + # Calculate gradients, reduce across processes, and clip. + self.timers('backward').start() + model.backward(lm_loss) + self.timers('backward').stop() + # Update parameters. + self.timers('optimizer').start() + model.step() + if lr_scheduler: + lr_scheduler.step() + self.timers('optimizer').stop() + if (self.accumulate_count + + 1) % self.gradient_accumulation_steps == 0: + self.accumulate_count = 0 + else: + self.accumulate_count += 1 + dist.barrier() + else: + log_dist("Found NaN loss, skip backward", [0]) + del lm_loss, reduced_loss + mems = [] + reduced_loss = None + return reduced_loss, mems + + def forward_step(self, data, model, mems=None): + """Simple forward step. """ + data['mems'] = mems + model_output = model(**data) + logits = model_output['logits'] + loss = model_output['loss'] + hidden_states = None + if 'hidden_states' in model_output: + hidden_states = model_output['hidden_states'] + elif 'encoder_hidden_states' in model_output: + hidden_states = model_output['encoder_hidden_states'] + + return { + 'loss': loss, + 'hidden_states': hidden_states, + 'logits': logits.contiguous().float() + } + + def backward_step(self, optimizer, model, lm_loss): + """Backward step.""" + + # Total loss. + loss = lm_loss + # Backward pass. + # if self.train_args.deepspeed: + if 'deepspeed' in self.env_type: + model.backward(loss) + else: + # optimizer.zero_grad() + if hasattr(optimizer, 'backward'): + optimizer.backward(loss, update_master_grads=False) + else: + loss.backward() + if self.env_type == 'pytorchDDP': + optimizer.step() + + # if self.train_args.deepspeed or self.train_args.DDP_impl == 'torch': + self.timers('allreduce').reset() + if self.env_type == 'pytorch': + torch.nn.utils.clip_grad_norm_(model.parameters(), self.clip_grad) + return lm_loss + + def _gather_all(self, input_): + + # Bypass the function if we are using only 1 GPU. + if torch.distributed.get_world_size() == 1: + return input_ + # Size and dimension. + last_dim = input_.dim() - 1 + rank = torch.distributed.get_rank() + world_size = torch.distributed.get_world_size() + + tensor_list = [ + torch.empty_like(input_, device=input_.device) + for _ in range(world_size) + ] + tensor_list[rank] = input_ + + torch.distributed.all_gather(tensor_list, input_) + + # Note: torch.cat already creates a contiguous tensor. + if last_dim >= 0: + output = torch.cat(tensor_list, dim=0).contiguous() + else: + output = torch.mean(torch.FloatTensor(tensor_list)) + + return output + + def _gather_all_mpu(self, input_): + group = mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if torch.distributed.get_world_size(group=group) == 1: + return input_ + # Size and dimension. + last_dim = input_.dim() - 1 + rank = torch.distributed.get_rank(group=group) + world_size = torch.distributed.get_world_size(group=group) + + tensor_list = [ + torch.empty_like(input_, device=input_.device) + for _ in range(world_size) + ] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=group) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=last_dim).contiguous() + + return output + + def evaluate(self, + data_loader=None, + model=None, + forward_step_func=None, + verbose=False): + """Evaluation.""" + + # Turn off checkpoint_activations + tmp_checkpoint_activations = None + tmp_model = model + while hasattr(tmp_model, 'module'): + tmp_model = tmp_model.module + # Turn on evaluation mode which disables dropout. + tmp_model.eval() + if hasattr(tmp_model, + 'config') and 'checkpoint_activations' in tmp_model.config: + tmp_checkpoint_activations = tmp_model.config[ + 'checkpoint_activations'] + tmp_model.config['checkpoint_activations'] = False + + mems = None + metrics = [0. for _ in range(len(self.metric_methods))] + + with torch.no_grad(): + assert data_loader is not None, "val loader is not None." + all_logits = [] + all_labels = [] + all_losses = [] + for data_iterator in data_loader: + # Forward evaluation. + + meta = data_iterator.get('meta', None) + + if 'deepspeed' in self.env_type or 'DDP' in self.env_type: + data_iterator = { + x: data_iterator[x].to( + torch.device('cuda', self.local_rank)) + for x in data_iterator + if x not in ['uid', 'meta', 'mode'] + } + elif torch.cuda.is_available(): + + data_iterator = { + x: + data_iterator[x].to(torch.device(self.pytorch_device)) + for x in data_iterator + if x not in ['uid', 'meta', 'mode'] + } + step_output = forward_step_func(data_iterator, model, mems) + '''when contiguous memory optimizations are enabled, the buffers + allocated by the optimizations are deallocated during backward pass + in the absence of backward pass the buffers should be reset after each + forward pass''' + if 'deepspeed' in self.env_type and self.deepspeed_activation_checkpointing: + deepspeed.checkpointing.reset() + logits = step_output['logits'] + lm_loss = step_output['loss'] + + if 'labels' in data_iterator: + labels = data_iterator['labels'] + else: + labels = data_iterator['target_ids'] + + all_logits.append(logits) + all_labels.append(labels) + all_losses.append(lm_loss.view(1)) + + if len(self.metric_methods) != 0: + all_logits = torch.cat(all_logits, dim=0) + all_labels = torch.cat(all_labels, dim=0) + + all_losses = torch.cat(all_losses, dim=0) + + if self.env_type == 'pytorchDDP' or self.env_type == 'deepspeed': + if len(self.metric_methods) != 0: + all_logits = self._gather_all(all_logits) + all_labels = self._gather_all(all_labels) + all_losses = self._gather_all(all_losses) + + elif self.env_type == 'deepspeed+mpu': + if len(self.metric_methods) != 0: + all_logits = self._gather_all_mpu(all_logits) + all_labels = self._gather_all_mpu(all_labels) + all_losses = self._gather_all_mpu(all_losses) + + if all_losses.device != torch.device('cpu'): + all_losses = all_losses.cpu().detach().numpy()[0] + + for i in range(len(self.metric_methods)): + eval_method = self.metric_methods[i][1] + metrics[i] += eval_method(all_logits, all_labels, meta=meta) + + # Move model back to the train mode. + + # model.train() + tmp_model.train() + # recover the settings for checkpoint_activations + if hasattr(tmp_model, + 'config') and 'checkpoint_activations' in tmp_model.config: + tmp_model.config[ + 'checkpoint_activations'] = tmp_checkpoint_activations + metric_dct = {} + for i in range(len(self.metric_methods)): + metric_name = self.metric_methods[i][0] + metric_dct.update({metric_name: metrics[i]}) + metric_dct.update({"loss": all_losses}) + return metric_dct + + def report_iteration_metrics(self, optimizer, lr, loss, elapsed_time, step, + total_step): + log_string = ' iteration {:8d}/{:8d} |'.format(step, total_step) + log_string += ' elapsed time per iteration (ms): {:.1f} |'.format( + elapsed_time) + log_string += ' learning rate {:.3E} |'.format(lr) + log_string += ' loss {:.6E} |'.format(loss) + if self.fp16: + log_string += ' loss scale {:.1f} |'.format( + optimizer.cur_scale if 'deepspeed' in self.env_type else + hasattr(optimizer, 'loss_scale') and optimizer.loss_scale) + # log_string += ' gradient_accumulation {}/{}'.format(self.accumulate_count, self.gradient_accumulation_steps) + log_dist(log_string, [0]) + + def report_evaluate_metrics(self, prefix, loss, ppl, gpt_loss, bert_loss, + sent_loss, multi_loss, step): + string = ' validation loss at {}'.format(prefix) + string += ' | LM loss: {:.6E}'.format(loss) + string += ' | LM PPL: {:.6E}'.format(ppl) + length = len(string) + 1 + log_dist('-' * 100, [0]) + log_dist('-' * length, [0]) + log_dist(string, [0]) + log_dist('-' * length, [0]) + + def evaluate_and_print_results( + self, + prefix=None, + forward_step_func=None, + data_loader=None, + model=None, + verbose=False, + ): + """Helper function to evaluate and dump results on screen.""" + eval_dict = self.evaluate(forward_step_func=forward_step_func, + data_loader=data_loader, + model=model, + verbose=verbose) + if eval_dict.get("loss", None) is not None: + string = ' validation loss at {} | {:.4f}, '.format( + prefix, eval_dict["loss"]) + # with open("results.txt", "a") as myfile: + # myfile.write(string) + if self.metric_methods is None: + return eval_dict + + for i in range(len(self.metric_methods)): + name = self.metric_methods[i][0] + string += ", {} {:.3f}".format(name, eval_dict[name]) + # string = ' validation loss at {} | {:.4f}, Acc {:.2f}'.format( + # prefix, eval_dict["loss"], eval_dict["metrics"]) + length = len(string) + 1 + log_dist('-' * length, [0]) + log_dist(string, [0]) + log_dist('-' * length, [0]) + return eval_dict \ No newline at end of file diff --git a/flagai/trainer.py b/flagai/trainer.py index ae5aeefd..bd051a7a 100644 --- a/flagai/trainer.py +++ b/flagai/trainer.py @@ -205,9 +205,8 @@ def __init__( self.hostfile = hostfile self.training_script = training_script - training_paras = self.get_dist_args() - if 'deepspeed' in self.env_type or self.env_type == 'pytorchDDP': + training_paras = self.get_dist_args() # Implement for AutoLaunch # >>> python train.py # will call get_dist_args() # `--not_call_launch` is default 'False' @@ -1044,172 +1043,3 @@ def evaluate_and_print_results( log_dist('-' * length, [0]) return eval_dict - -class BatchTrainer(Trainer): - def __init__(self): - super(BatchTrainer, self).__init__() - - def get_dist_args(self): - parser = argparse.ArgumentParser() - parser.add_argument('--local_rank', - type=int, - default=0, - help="local_rank") - parser.add_argument('--env_type', - type=str, - required=True, - help="env_type: pytorch, pytorchDDP, deepspeed, deepspeed+mpu") - parser.add_argument('--not_call_launch', - action='store_true', - help="not call launch!") - parser.add_argument('--experiment_name', - type=str, - default="test", - help="experiment_name") - parser.add_argument('--batch_size', - type=int, - default=1, - help="batch size") - parser.add_argument('--gradient_accumulation_steps', - type=int, - default=1, - help="gradient_accumulation_steps") - parser.add_argument('--lr', - type=float, - default=1e-5, - help="learning rate") - parser.add_argument('--weight_decay', - type=float, - default=1e-3, - help="weight_decay") - parser.add_argument('--epochs', - type=int, - default=2, - help="epochs") - parser.add_argument('--fp16', - type=bool, - default=False, - help="fp16") - parser.add_argument('--log_interval', - type=int, - default=10, - help="log_interval") - - parser.add_argument('--eval_interval', - type=int, - default=1000, - help="eval_interval") - parser.add_argument('--load_dir', - type=str, - default=None, - help="load_dir") - parser.add_argument('--save_dir', - type=str, - default="./checkpoints", - help="save_dir") - parser.add_argument('--save_interval', - type=int, - default=1000, - help="save_interval") - parser.add_argument('--num_checkpoints', - type=int, - default=1, - help="num_checkpoints") - parser.add_argument('--pytorch_device', - type=str, - default="cpu", - help="pytorch_device") - parser.add_argument('--num_nodes', - type=int, - default=1, - help="num_nodes") - parser.add_argument('--num_gpus', - type=int, - default=1, - help="num_gpus") - parser.add_argument('--deepspeed_config', - type=str, - default="./deepspeed.json", - help="deepspeed_config") - parser.add_argument('--hostfile', - type=str, - default="hostfile", - help="hostfile") - parser.add_argument('--model_parallel_size', - type=int, - default=1, - help="model_parallel_size") - parser.add_argument('--training_script', - type=str, - default="train.py", - help="training_script") - parser.add_argument('--master_ip', - type=str, - default="127.0.0.1", - help="master_ip") - parser.add_argument('--master_port', - type=int, - default=17500, - help="master_ip") - - ds_args = parser.parse_args() - self.local_rank = ds_args.local_rank - self.not_call_launch = ds_args.not_call_launch - self.rank = int(os.environ.get('RANK', 0)) - self.world_size = int(os.environ.get('WORLD_SIZE', 1)) - self.master_addr = ds_args.master_ip - self.master_port = ds_args.master_port - self.env_type = ds_args.env_type - self.experiment_name = ds_args.experiment_name - self.batch_size = ds_args.batch_size - self.gradient_accumulation_steps = ds_args.gradient_accumulation_steps - self.lr = ds_args.lr - self.weight_decay = ds_args.weight_decay - self.epochs = ds_args.epochs - self.fp16 = ds_args.fp16 - self.log_interval = ds_args.log_interval - self.eval_interval = ds_args.eval_interval - self.load_dir = ds_args.load_dir - self.save_dir = ds_args.save_dir - self.save_interval = ds_args.save_interval - self.num_checkpoints = ds_args.num_checkpoints - self.pytorch_device = ds_args.pytorch_device - self.num_nodes = ds_args.num_nodes - self.num_gpus = ds_args.num_gpus - self.deepspeed_config = ds_args.deepspeed_config - self.hostfile = ds_args.hostfile - self.model_parallel_size = ds_args.model_parallel_size - self.training_script = ds_args.training_script - - log_dist("not_call_launch: {}".format(ds_args.not_call_launch)) - - return [ - "--env_type", - self.env_type, - "--experiment_name", - self.experiment_name, - "--batch_size", - str(self.batch_size), - "--gradient_accumulation_steps", - str(self.gradient_accumulation_steps), - "--lr", - str(self.lr), - "--weight_decay", - str(self.weight_decay), - "--epochs", - str(self.epochs), - "--log_interval", - str(self.log_interval), - "--eval_interval", - str(self.eval_interval), - "--load_dir", - str(self.load_dir), - "--save_dir", - str(self.save_dir), - "--save_interval", - str(self.save_interval), - "--num_checkpoints", - str(self.num_checkpoints), - "--fp16", - str(self.fp16) - ] \ No newline at end of file From dc5a84df6dcb803c58c80a57c1840e9adefd5050 Mon Sep 17 00:00:00 2001 From: Zac Liu Date: Thu, 21 Jul 2022 15:03:51 +0800 Subject: [PATCH 19/21] Create README.md fix typo in flagai introduction. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 37b40dac..614e9fb0 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ -------------------------------------------------------------------------------- -FlagAI (Fast LArge-scale General AI models) is an fast, easy-to-use and extensible toolkit for large-scale model. Our goal is to support training, fine-tuning, and deployment of large-scale models on various downstream tasks with multi-modality. Currently, we are focusing on NLP models and tasks. In near futher, we will support for other modalities. +FlagAI (Fast LArge-scale General AI models) is a fast, easy-to-use and extensible toolkit for large-scale model. Our goal is to support training, fine-tuning, and deployment of large-scale models on various downstream tasks with multi-modality. Currently, we are focusing on NLP models and tasks. In near futher, we will support for other modalities. * Now it supports **WuDao GLM** with a maximum of 10 billion parameters (see [Introduction to GLM](/docs/GLM.md)). It also supports **BERT**, **RoBERTa**, **GPT2**, **T5**, and models from Huggingface Transformers. From 437caa4e7816b461e3167f2a6981ad3f17bf40d8 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Thu, 21 Jul 2022 18:09:39 +0800 Subject: [PATCH 20/21] vit-checkpoint-activations Signed-off-by: zhaohu xing <920232796@qq.com> --- config.json | 10 +++ examples/vit_cifar100/train_DDP.py | 1 + examples/vit_cifar100/train_deepspeed.py | 1 + flagai/model/vision/helpers.py | 70 ++++++++++++++++++ flagai/model/vision/vit.py | 53 +++++++------- flagai/trainer.py | 5 +- train_deepspeed.py | 90 ++++++++++++++++++++++++ validate.py | 76 ++++++++++++++++++++ 8 files changed, 278 insertions(+), 28 deletions(-) create mode 100644 config.json create mode 100644 flagai/model/vision/helpers.py create mode 100644 train_deepspeed.py create mode 100644 validate.py diff --git a/config.json b/config.json new file mode 100644 index 00000000..27f39de5 --- /dev/null +++ b/config.json @@ -0,0 +1,10 @@ +{ + "img_size": 224, + "patch_size": 16, + "in_chans": 3, + "embed_dim": 768, + "depth": 12, + "num_heads": 12, + "num_classes": 100, + "checkpoint_activations": false +} \ No newline at end of file diff --git a/examples/vit_cifar100/train_DDP.py b/examples/vit_cifar100/train_DDP.py index 06f5cd1a..35c997b1 100644 --- a/examples/vit_cifar100/train_DDP.py +++ b/examples/vit_cifar100/train_DDP.py @@ -28,6 +28,7 @@ save_interval=1000, num_checkpoints=1, hostfile="./hostfile", + training_script="train_DDP.py" ) def build_cifar(): diff --git a/examples/vit_cifar100/train_deepspeed.py b/examples/vit_cifar100/train_deepspeed.py index 27d46628..9d44b1df 100644 --- a/examples/vit_cifar100/train_deepspeed.py +++ b/examples/vit_cifar100/train_deepspeed.py @@ -29,6 +29,7 @@ save_interval=1000, num_checkpoints=1, hostfile="./hostfile", + training_script="train_deepspeed.py" ) def build_cifar(): diff --git a/flagai/model/vision/helpers.py b/flagai/model/vision/helpers.py new file mode 100644 index 00000000..1e56190d --- /dev/null +++ b/flagai/model/vision/helpers.py @@ -0,0 +1,70 @@ + +import os +if os.getenv('ENV_TYPE') == 'deepspeed': + from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint +else: + from torch.utils.checkpoint import checkpoint +import torch +from itertools import chain + +def checkpoint_seq( + functions, + x, + every=1, + flatten=False, + skip_last=False, +): + r"""A helper function for checkpointing sequential models. + Sequential models execute a list of modules/functions in order + (sequentially). Therefore, we can divide such a sequence into segments + and checkpoint each segment. All segments except run in :func:`torch.no_grad` + manner, i.e., not storing the intermediate activations. The inputs of each + checkpointed segment will be saved for re-running the segment in the backward pass. + See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. + .. warning:: + Checkpointing currently only supports :func:`torch.autograd.backward` + and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` + is not supported. + .. warning: + At least one of the inputs needs to have :code:`requires_grad=True` if + grads are needed for model inputs, otherwise the checkpointed part of the + model won't have gradients. + Args: + functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially. + x: A Tensor that is input to :attr:`functions` + every: checkpoint every-n functions (default: 1) + flatten (bool): flatten nn.Sequential of nn.Sequentials + skip_last (bool): skip checkpointing the last function in the sequence if True + preserve_rng_state (bool, optional, default=True): Omit stashing and restoring + the RNG state during each checkpoint. + Returns: + Output of running :attr:`functions` sequentially on :attr:`*inputs` + Example: + >>> model = nn.Sequential(...) + >>> input_var = checkpoint_seq(model, input_var, every=2) + """ + def run_function(start, end, functions): + def forward(_x): + for j in range(start, end + 1): + _x = functions[j](_x) + return _x + return forward + + if isinstance(functions, torch.nn.Sequential): + functions = functions.children() + if flatten: + functions = chain.from_iterable(functions) + if not isinstance(functions, (tuple, list)): + functions = tuple(functions) + + num_checkpointed = len(functions) + if skip_last: + num_checkpointed -= 1 + end = -1 + for start in range(0, num_checkpointed, every): + end = min(start + every - 1, num_checkpointed - 1) + x = checkpoint(run_function(start, end, functions), x) + if skip_last: + return run_function(end + 1, len(functions) - 1, functions)(x) + return x + diff --git a/flagai/model/vision/vit.py b/flagai/model/vision/vit.py index 44479a1e..6fc93865 100644 --- a/flagai/model/vision/vit.py +++ b/flagai/model/vision/vit.py @@ -33,6 +33,7 @@ from flagai.model.vision.layers.drop import DropPath from flagai.model.vision.layers.weight_init import trunc_normal_, lecun_normal_ from flagai.model.base_model import BaseModel +from flagai.model.vision.helpers import checkpoint_seq class VitConfig: def __init__(self, @@ -53,7 +54,7 @@ def __init__(self, attn_drop_rate=0., drop_path_rate=0., weight_init='', - checkpoint_activations=None): + checkpoint_activations=False): pass self.img_size=img_size self.patch_size=patch_size @@ -74,7 +75,6 @@ def __init__(self, self.weight_init=weight_init self.checkpoint_activations = checkpoint_activations - def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: if not depth_first and include_root: fn(module=module, name=name) @@ -206,42 +206,42 @@ def __init__( block_fn=Block vit_config = VitConfig(**config) vit_config.num_classes = num_classes - config = vit_config + # config = vit_config - assert config.global_pool in ('', 'avg', 'token') - assert config.class_token or config.global_pool != 'token' - use_fc_norm = config.global_pool == 'avg' if config.fc_norm is None else config.fc_norm + assert vit_config.global_pool in ('', 'avg', 'token') + assert vit_config.class_token or vit_config.global_pool != 'token' + use_fc_norm = vit_config.global_pool == 'avg' if vit_config.fc_norm is None else vit_config.fc_norm norm_layer = partial(nn.LayerNorm, eps=1e-6) act_layer = nn.GELU self.num_classes = num_classes - self.global_pool = config.global_pool - self.num_features = self.embed_dim = config.embed_dim # num_features for consistency with other models - self.num_tokens = 1 if config.class_token else 0 - self.grad_checkpointing = False + self.global_pool = vit_config.global_pool + self.num_features = self.embed_dim = vit_config.embed_dim # num_features for consistency with other models + self.num_tokens = 1 if vit_config.class_token else 0 + self.grad_checkpointing = vit_config.checkpoint_activations self.patch_embed = embed_layer( - img_size=config.img_size, patch_size=config.patch_size, in_chans=config.in_chans, embed_dim=config.embed_dim) + img_size=vit_config.img_size, patch_size=vit_config.patch_size, in_chans=vit_config.in_chans, embed_dim=vit_config.embed_dim) num_patches = self.patch_embed.num_patches - self.cls_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if self.num_tokens > 0 else None - self.pos_embed = nn.Parameter(torch.randn(1, num_patches + self.num_tokens, config.embed_dim) * .02) - self.pos_drop = nn.Dropout(p=config.drop_rate) + self.cls_token = nn.Parameter(torch.zeros(1, 1, vit_config.embed_dim)) if self.num_tokens > 0 else None + self.pos_embed = nn.Parameter(torch.randn(1, num_patches + self.num_tokens, vit_config.embed_dim) * .02) + self.pos_drop = nn.Dropout(p=vit_config.drop_rate) - dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.depth)] # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, vit_config.drop_path_rate, vit_config.depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ block_fn( - dim=config.embed_dim, num_heads=config.num_heads, mlp_ratio=config.mlp_ratio, qkv_bias=config.qkv_bias, init_values=config.init_values, - drop=config.drop_rate, attn_drop=config.attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) - for i in range(config.depth)]) - self.norm = norm_layer(config.embed_dim) if not use_fc_norm else nn.Identity() + dim=vit_config.embed_dim, num_heads=vit_config.num_heads, mlp_ratio=vit_config.mlp_ratio, qkv_bias=vit_config.qkv_bias, init_values=vit_config.init_values, + drop=vit_config.drop_rate, attn_drop=vit_config.attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(vit_config.depth)]) + self.norm = norm_layer(vit_config.embed_dim) if not use_fc_norm else nn.Identity() # Classifier Head - self.fc_norm = norm_layer(config.embed_dim) if use_fc_norm else nn.Identity() + self.fc_norm = norm_layer(vit_config.embed_dim) if use_fc_norm else nn.Identity() self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() - if config.weight_init != 'skip': - self.init_weights(config.weight_init) + if vit_config.weight_init != 'skip': + self.init_weights(vit_config.weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'moco', '') @@ -290,10 +290,11 @@ def forward_features(self, x): if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) x = self.pos_drop(x + self.pos_embed) - # if self.grad_checkpointing and not torch.jit.is_scripting(): - # x = checkpoint_seq(self.blocks, x) - # else: - x = self.blocks(x) + + if self.config["checkpoint_activations"]: + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) x = self.norm(x) return x diff --git a/flagai/trainer.py b/flagai/trainer.py index a64e8f3f..f2fd8dd1 100644 --- a/flagai/trainer.py +++ b/flagai/trainer.py @@ -348,7 +348,8 @@ def train(self, train_dataset=None, valid_dataset=None, metric_methods=[], - collate_fn=None): + collate_fn=None, + find_unused_parameters=True): """Training Loops""" """ Trainer is a simple but unifed training and eval loop for PyTorch/Deepspeed/Megatron-LM. @@ -416,7 +417,7 @@ def train(self, model.to(torch.device('cuda', self.local_rank)) model = DDP(model, device_ids=[self.local_rank], - find_unused_parameters=True) + find_unused_parameters=find_unused_parameters) elif self.env_type == 'pytorch': model.to(self.pytorch_device) diff --git a/train_deepspeed.py b/train_deepspeed.py new file mode 100644 index 00000000..f4b39fd3 --- /dev/null +++ b/train_deepspeed.py @@ -0,0 +1,90 @@ +import torch +from torchvision import transforms +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.trainer import Trainer +from flagai.auto_model.auto_loader import AutoLoader + +lr = 2e-5 +n_epochs = 50 +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +env_type = "pytorchDDP" +trainer = Trainer( + env_type=env_type, + experiment_name="vit-cifar100-deepspeed", + batch_size=128, + num_gpus=2, + fp16=True, + gradient_accumulation_steps=1, + lr=lr, + weight_decay=1e-5, + epochs=n_epochs, + log_interval=10, + eval_interval=100, + load_dir=None, + pytorch_device=device, + save_dir="checkpoints_vit_cifar100_deepspeed", + save_interval=1000, + num_checkpoints=1, + hostfile="./hostfile", + deepspeed_config="./deepspeed.json", + training_script="train_deepspeed.py", + checkpoint_activations=True, +) + +def build_cifar(): + transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.Resize(224), + transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) + test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) + return train_dataset, test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + if trainer.fp16: + images = images.half() + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + # optimizer = torch.optim.Adam(model.parameters(), lr=lr) + # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) + train_dataset, val_dataset = build_cifar() + + trainer.train(model, + # optimizer=optimizer, + # lr_scheduler=scheduler, + train_dataset=train_dataset, + valid_dataset=val_dataset, + metric_methods=[["accuracy", validate]], + collate_fn=collate_fn) + + + + + diff --git a/validate.py b/validate.py new file mode 100644 index 00000000..2e3fbc3c --- /dev/null +++ b/validate.py @@ -0,0 +1,76 @@ +import torch +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR100 +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +from flagai.auto_model.auto_loader import AutoLoader +import os +from tqdm import tqdm + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +def build_cifar(): + + transform_test = transforms.Compose([ + transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ]) + + test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) + return test_dataset + +def collate_fn(batch): + images = torch.stack([b[0] for b in batch]) + labels = [b[1] for b in batch] + labels = torch.tensor(labels).long() + return {"images": images, "labels": labels} + +def validate(logits, labels, meta=None): + _, predicted = logits.max(1) + total = labels.size(0) + correct = predicted.eq(labels).sum().item() + return correct / total + +if __name__ == '__main__': + + model_save_dir = "./checkpoints_vit_cifar100" + print(f"loadding model in :{model_save_dir}") + loader = AutoLoader(task_name="classification", + model_name="vit-base-p16-224", + num_classes=100) + + model = loader.get_model() + + model.load_state_dict(torch.load(os.path.join(model_save_dir, "38000", "pytorch_model.bin"), map_location=device)["module"]) + print(f"model load success.......") + model.to(device) + + val_dataset = build_cifar() + + val_dataloader = DataLoader(val_dataset, + batch_size=1, + shuffle=False, + collate_fn=collate_fn) + index = 0 + accuracy = 0.0 + for data in tqdm(val_dataloader, total=len(val_dataloader)): + index += 1 + data = {k: v.to(device) for k, v in data.items()} + labels = data["labels"] + pred = model(**data)["logits"] + acc = validate(pred, labels) + accuracy += acc + + print(f"accuracy is {accuracy / index}") + + + + + + + + + + From dc6fc3dc3bb701175455fceaf3119b2666155616 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Thu, 21 Jul 2022 18:10:06 +0800 Subject: [PATCH 21/21] vit-checkpoint-activations Signed-off-by: zhaohu xing <920232796@qq.com> --- config.json | 10 ------ train_deepspeed.py | 90 ---------------------------------------------- validate.py | 76 --------------------------------------- 3 files changed, 176 deletions(-) delete mode 100644 config.json delete mode 100644 train_deepspeed.py delete mode 100644 validate.py diff --git a/config.json b/config.json deleted file mode 100644 index 27f39de5..00000000 --- a/config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "img_size": 224, - "patch_size": 16, - "in_chans": 3, - "embed_dim": 768, - "depth": 12, - "num_heads": 12, - "num_classes": 100, - "checkpoint_activations": false -} \ No newline at end of file diff --git a/train_deepspeed.py b/train_deepspeed.py deleted file mode 100644 index f4b39fd3..00000000 --- a/train_deepspeed.py +++ /dev/null @@ -1,90 +0,0 @@ -import torch -from torchvision import transforms -from torchvision.datasets import CIFAR100 -import ssl -ssl._create_default_https_context = ssl._create_unverified_context -from flagai.trainer import Trainer -from flagai.auto_model.auto_loader import AutoLoader - -lr = 2e-5 -n_epochs = 50 -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -env_type = "pytorchDDP" -trainer = Trainer( - env_type=env_type, - experiment_name="vit-cifar100-deepspeed", - batch_size=128, - num_gpus=2, - fp16=True, - gradient_accumulation_steps=1, - lr=lr, - weight_decay=1e-5, - epochs=n_epochs, - log_interval=10, - eval_interval=100, - load_dir=None, - pytorch_device=device, - save_dir="checkpoints_vit_cifar100_deepspeed", - save_interval=1000, - num_checkpoints=1, - hostfile="./hostfile", - deepspeed_config="./deepspeed.json", - training_script="train_deepspeed.py", - checkpoint_activations=True, -) - -def build_cifar(): - transform_train = transforms.Compose([ - transforms.RandomCrop(32, padding=4), - transforms.Resize(224), - transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10), - transforms.ToTensor(), - transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - transform_test = transforms.Compose([ - transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - - train_dataset = CIFAR100(root="./data/cifar100", train=True, download=True, transform=transform_train) - test_dataset = CIFAR100(root="./data/cifar100", train=False, download=True, transform=transform_test) - return train_dataset, test_dataset - -def collate_fn(batch): - images = torch.stack([b[0] for b in batch]) - if trainer.fp16: - images = images.half() - labels = [b[1] for b in batch] - labels = torch.tensor(labels).long() - return {"images": images, "labels": labels} - -def validate(logits, labels, meta=None): - _, predicted = logits.max(1) - total = labels.size(0) - correct = predicted.eq(labels).sum().item() - return correct / total - -if __name__ == '__main__': - loader = AutoLoader(task_name="classification", - model_name="vit-base-p16-224", - num_classes=100) - - model = loader.get_model() - # optimizer = torch.optim.Adam(model.parameters(), lr=lr) - # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs) - train_dataset, val_dataset = build_cifar() - - trainer.train(model, - # optimizer=optimizer, - # lr_scheduler=scheduler, - train_dataset=train_dataset, - valid_dataset=val_dataset, - metric_methods=[["accuracy", validate]], - collate_fn=collate_fn) - - - - - diff --git a/validate.py b/validate.py deleted file mode 100644 index 2e3fbc3c..00000000 --- a/validate.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -from torchvision import transforms -from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR100 -import ssl -ssl._create_default_https_context = ssl._create_unverified_context -from flagai.auto_model.auto_loader import AutoLoader -import os -from tqdm import tqdm - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -def build_cifar(): - - transform_test = transforms.Compose([ - transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - - test_dataset = CIFAR100(root="./cifar100", train=False, download=True, transform=transform_test) - return test_dataset - -def collate_fn(batch): - images = torch.stack([b[0] for b in batch]) - labels = [b[1] for b in batch] - labels = torch.tensor(labels).long() - return {"images": images, "labels": labels} - -def validate(logits, labels, meta=None): - _, predicted = logits.max(1) - total = labels.size(0) - correct = predicted.eq(labels).sum().item() - return correct / total - -if __name__ == '__main__': - - model_save_dir = "./checkpoints_vit_cifar100" - print(f"loadding model in :{model_save_dir}") - loader = AutoLoader(task_name="classification", - model_name="vit-base-p16-224", - num_classes=100) - - model = loader.get_model() - - model.load_state_dict(torch.load(os.path.join(model_save_dir, "38000", "pytorch_model.bin"), map_location=device)["module"]) - print(f"model load success.......") - model.to(device) - - val_dataset = build_cifar() - - val_dataloader = DataLoader(val_dataset, - batch_size=1, - shuffle=False, - collate_fn=collate_fn) - index = 0 - accuracy = 0.0 - for data in tqdm(val_dataloader, total=len(val_dataloader)): - index += 1 - data = {k: v.to(device) for k, v in data.items()} - labels = data["labels"] - pred = model(**data)["logits"] - acc = validate(pred, labels) - accuracy += acc - - print(f"accuracy is {accuracy / index}") - - - - - - - - - -