From 17de42d1022639e79f6d1968aeac9bc45bb4ebf3 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 1 Aug 2022 11:36:52 -0700 Subject: [PATCH 01/34] [WIP] Skeleton of VisualQuestionAnweringPipeline extended to support LayoutLM-like models --- src/transformers/models/auto/modeling_auto.py | 7 + src/transformers/pipelines/base.py | 4 +- .../pipelines/question_answering.py | 2 +- .../pipelines/visual_question_answering.py | 188 +++++++++++++++--- 4 files changed, 171 insertions(+), 30 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 5060b535b05da4..e84585f696e465 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -562,6 +562,10 @@ ("funnel", "FunnelForQuestionAnswering"), ("gptj", "GPTJForQuestionAnswering"), ("ibert", "IBertForQuestionAnswering"), + # TODO: Do the LayoutLM classes belong here, or should they only be in VisualQuestionAnsweringPipeline? + # Because the bounding boxes are optional inputs to the model, you technically _can_ execute these models + # through the question answering pipeline (because its output shape matches), but I'm not sure if models + # should belong in multiple mappings. ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ("led", "LEDForQuestionAnswering"), @@ -600,6 +604,9 @@ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ ("vilt", "ViltForQuestionAnswering"), + ("layoutlm", "LayoutLMForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ] ) diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 7842b95b32859c..b5e7c9cb58ce08 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -178,7 +178,7 @@ def infer_framework_load_model( model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, - **model_kwargs + **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). @@ -274,7 +274,7 @@ def infer_framework_from_model( model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, - **model_kwargs + **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 6f07382dc57c6b..918931a2cac47e 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -141,7 +141,7 @@ def __init__( framework: Optional[str] = None, device: int = -1, task: str = "", - **kwargs + **kwargs, ): super().__init__( model=model, diff --git a/src/transformers/pipelines/visual_question_answering.py b/src/transformers/pipelines/visual_question_answering.py index 34a7a3b10d40fe..9ed2353aaf1529 100644 --- a/src/transformers/pipelines/visual_question_answering.py +++ b/src/transformers/pipelines/visual_question_answering.py @@ -1,6 +1,7 @@ -from typing import Union +from typing import List, Optional, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging +from ..modeling_outputs import SequenceClassifierOutput, QuestionAnsweringModelOutput from .base import PIPELINE_INIT_ARGS, Pipeline @@ -10,11 +11,51 @@ from ..image_utils import load_image if is_torch_available(): + import torch + from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING logger = logging.get_logger(__name__) +def postprocess_sequence_output(model, model_outputs, framework, top_k): + if top_k > model.config.num_labels: + top_k = model.config.num_labels + + if framework == "pt": + probs = model_outputs.logits.sigmoid()[0] + scores, ids = probs.topk(top_k) + else: + raise ValueError(f"Unsupported framework: {framework}") + + scores = scores.tolist() + ids = ids.tolist() + return [{"score": score, "answer": model.config.id2label[_id]} for score, _id in zip(scores, ids)] + + +def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_k): + # TODO: Test to make sure this works with tensorflow too (or break on the framework) + + # TODO: This is a very poor implementation of start/end (just here for completeness sake). + # Ideally we can refactor/borrow the implementation in the question answering pipeline. + results = [] + for i, (s, e) in enumerate(zip(model_outputs.start_logits.argmax(-1), model_outputs.end_logits.argmax(-1))): + if s > e: + continue + else: + word_start, word_end = word_ids[i][s], word_ids[i][e] + results.append( + { + "score": 0.5, # TODO + "answer": " ".join(words[word_start : word_end + 1]), + "start": word_start, + "end": word_end, + } + ) + + return results + + @add_end_docstrings(PIPELINE_INIT_ARGS) class VisualQuestionAnsweringPipeline(Pipeline): """ @@ -33,7 +74,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING) - def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, **kwargs): + def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, words=None, boxes=None, **kwargs): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding @@ -41,9 +82,20 @@ def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, **kwar preprocess_params["truncation"] = truncation if top_k is not None: postprocess_params["top_k"] = top_k + if words is not None or boxes is not None: + if words is None or boxes is None: + raise ValueError("Must provide both words and boxes if providing either") + preprocess_params["words"] = words + preprocess_params["boxes"] = boxes + return preprocess_params, {}, postprocess_params - def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs): + def __call__( + self, + image: [Optional[Union["Image.Image", str]]] = None, + question: Optional[Union[List[str], str]] = None, + **kwargs, + ): r""" Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below: @@ -74,8 +126,10 @@ def __call__(self, image: Union["Image.Image", str], question: str = None, **kwa - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ - if isinstance(image, (Image.Image, str)) and isinstance(question, str): - inputs = {"image": image, "question": question} + if (isinstance(image, (Image.Image, str)) or image is None) and isinstance(question, str): + inputs = {"question": question} + if image is not None: + inputs["image"] = image else: """ Supports the following format @@ -87,29 +141,109 @@ def __call__(self, image: Union["Image.Image", str], question: str = None, **kwa results = super().__call__(inputs, **kwargs) return results - def preprocess(self, inputs, padding=False, truncation=False): - image = load_image(inputs["image"]) - model_inputs = self.tokenizer( - inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation + def preprocess(self, inputs, padding=False, truncation=False, words=[], boxes=[]): + image_features = {} + if "image" in inputs: + image = load_image(inputs["image"]) + image_features = self.feature_extractor(images=image, return_tensors=self.framework) + + # TODO: If the image is specified, depending on the tokenizer, we should update inputs + # with the words/bounding boxes. + # + # TODO: LayoutLMv1 does not come with a feature extractor that can run OCR, but v2 and v3 do. + # We could either (a) add a feature extractor for v1 that does OCR (and is a no-op otherwise) + # or build the OCR implementation into this pipeline (and expect users of v2 and v3 to + # instantiate the pipeline with feature extractors that have OCR disabled. + + if not ("image" in inputs or "words" in inputs): + raise ValueError("Must provide at least one of an image or words/bounding boxes") + + question = inputs["question"] + text_pair = None + + extra_tokenizer_params = {} + if "words" in inputs: + # TODO: Can we refactor and share (or inherit from) the QuestionAnweringPipeline? I think we want to do + # the same thing with the tokenizer, except we have a few extra arguments (e.g. the bounding boxes). + extra_tokenizer_params = { + "return_token_type_ids": True, + "return_attention_mask": True, + "is_split_into_words": True, + } + padding = "max_length" + truncation = True + question = [question] + text_pair = inputs["words"] + + encoding = self.tokenizer( + text=question, + text_pair=text_pair, + return_tensors=self.framework, + padding=padding, + truncation=truncation, + **extra_tokenizer_params, ) - image_features = self.feature_extractor(images=image, return_tensors=self.framework) - model_inputs.update(image_features) - return model_inputs - def _forward(self, model_inputs): + num_spans = len(encoding["input_ids"]) + + if "boxes" in inputs: + boxes = inputs["boxes"] + bbox = [] + for batch_index in range(num_spans): + for i, s, w in zip( + encoding.input_ids[batch_index], + encoding.sequence_ids(batch_index), + encoding.word_ids(batch_index), + ): + if s == 1: + bbox.append(boxes[w]) + elif i == self.tokenizer.sep_token_id: + bbox.append([1000] * 4) + else: + bbox.append([0] * 4) + + if self.framework == "tf": + # TODO implement + pass + elif self.framework == "pt": + encoding["bbox"] = torch.tensor([bbox]) + + # TODO: Handle multiple spans. We'll basically want to duplicate the image features for each span + # (we can then also remove this assert) + assert len(image_features) == 0 or list(image_features.items())[0].size(0) == list(encoding.items())[0].size(0) + encoding.update(image_features) + + # TODO: I think it's cleaner to place the encoding and other context in the dict in separate keys + # instead of flat at the top level. I'm happy to undo this though if it's not in line with + # other parts of the code. + return { + "encoding": encoding, + "word_ids": [encoding.word_ids(i) for i in range(len(encoding["input_ids"]))], + "sequence_ids": [encoding.sequence_ids(i) for i in range(len(encoding["input_ids"]))], + "inputs": inputs, + } + + def _forward(self, inputs): + model_inputs = {k: inputs["encoding"][k] for k in self.tokenizer.model_input_names} model_outputs = self.model(**model_inputs) - return model_outputs - - def postprocess(self, model_outputs, top_k=5): - if top_k > self.model.config.num_labels: - top_k = self.model.config.num_labels - - if self.framework == "pt": - probs = model_outputs.logits.sigmoid()[0] - scores, ids = probs.topk(top_k) + return {"outputs": model_outputs, "inputs": inputs} + + def postprocess(self, result, top_k=5): + model_outputs = result["outputs"] + + # TODO: Is there a better way to do this? I tried using + # isinstance(model_outputs, SequenceClassifierOutput) but that thinks model_outputs + # is transformers.utils.generic.ModelOutput + if "logits" in model_outputs: + return postprocess_sequence_output(self.model, model_outputs, self.framework, top_k) + elif "start_logits" in model_outputs and "end_logits" in model_outputs: + return postprocess_qa_output( + self.model, + model_outputs, + result["inputs"]["word_ids"], + result["inputs"]["inputs"]["words"], + self.framework, + top_k, + ) else: - raise ValueError(f"Unsupported framework: {self.framework}") - - scores = scores.tolist() - ids = ids.tolist() - return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] + assert False, "Unknown output format" From 80579000dec491632fc9c0c143626e1a4bb1e44e Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 1 Aug 2022 22:28:44 -0700 Subject: [PATCH 02/34] Fixup --- src/transformers/models/auto/modeling_auto.py | 10 +++++----- .../pipelines/visual_question_answering.py | 7 +++---- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index e84585f696e465..42d538a2dc5c00 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -541,6 +541,10 @@ ] ) +# TODO: Do the LayoutLM classes belong here, or should they only be in VisualQuestionAnsweringPipeline? +# Because the bounding boxes are optional inputs to the model, you technically _can_ execute these models +# through the question answering pipeline (because its output shape matches), but I'm not sure if models +# should belong in multiple mappings. MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ # Model for Question Answering mapping @@ -562,10 +566,6 @@ ("funnel", "FunnelForQuestionAnswering"), ("gptj", "GPTJForQuestionAnswering"), ("ibert", "IBertForQuestionAnswering"), - # TODO: Do the LayoutLM classes belong here, or should they only be in VisualQuestionAnsweringPipeline? - # Because the bounding boxes are optional inputs to the model, you technically _can_ execute these models - # through the question answering pipeline (because its output shape matches), but I'm not sure if models - # should belong in multiple mappings. ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ("led", "LEDForQuestionAnswering"), @@ -603,10 +603,10 @@ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ - ("vilt", "ViltForQuestionAnswering"), ("layoutlm", "LayoutLMForQuestionAnswering"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ("vilt", "ViltForQuestionAnswering"), ] ) diff --git a/src/transformers/pipelines/visual_question_answering.py b/src/transformers/pipelines/visual_question_answering.py index 9ed2353aaf1529..b061ba1095e006 100644 --- a/src/transformers/pipelines/visual_question_answering.py +++ b/src/transformers/pipelines/visual_question_answering.py @@ -1,7 +1,6 @@ from typing import List, Optional, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging -from ..modeling_outputs import SequenceClassifierOutput, QuestionAnsweringModelOutput from .base import PIPELINE_INIT_ARGS, Pipeline @@ -151,9 +150,9 @@ def preprocess(self, inputs, padding=False, truncation=False, words=[], boxes=[] # with the words/bounding boxes. # # TODO: LayoutLMv1 does not come with a feature extractor that can run OCR, but v2 and v3 do. - # We could either (a) add a feature extractor for v1 that does OCR (and is a no-op otherwise) - # or build the OCR implementation into this pipeline (and expect users of v2 and v3 to - # instantiate the pipeline with feature extractors that have OCR disabled. + # We could either (a) require users of v1 pass in words (or implement a v1 feature extractor) + # or (b) build the OCR implementation into this pipeline (and expect users of v2 and v3 to + # instantiate the pipeline with feature extractors that have OCR disabled). if not ("image" in inputs or "words" in inputs): raise ValueError("Must provide at least one of an image or words/bounding boxes") From 652d140447bbf349c159663ab48aa8883245b139 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 1 Aug 2022 23:02:28 -0700 Subject: [PATCH 03/34] Use the full encoding --- src/transformers/pipelines/visual_question_answering.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/pipelines/visual_question_answering.py b/src/transformers/pipelines/visual_question_answering.py index b061ba1095e006..6987db8706241d 100644 --- a/src/transformers/pipelines/visual_question_answering.py +++ b/src/transformers/pipelines/visual_question_answering.py @@ -168,6 +168,7 @@ def preprocess(self, inputs, padding=False, truncation=False, words=[], boxes=[] "return_token_type_ids": True, "return_attention_mask": True, "is_split_into_words": True, + # "max_length": 512, } padding = "max_length" truncation = True @@ -223,8 +224,7 @@ def preprocess(self, inputs, padding=False, truncation=False, words=[], boxes=[] } def _forward(self, inputs): - model_inputs = {k: inputs["encoding"][k] for k in self.tokenizer.model_input_names} - model_outputs = self.model(**model_inputs) + model_outputs = self.model(**inputs["encoding"]) return {"outputs": model_outputs, "inputs": inputs} def postprocess(self, result, top_k=5): From 5fb7de5c97115635b4e287b2bacdbb6da34bd196 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 3 Aug 2022 22:53:19 -0700 Subject: [PATCH 04/34] Basic refactoring to DocumentQuestionAnsweringPipeline --- src/transformers/models/auto/modeling_auto.py | 21 +- src/transformers/pipelines/__init__.py | 13 +- .../pipelines/document_question_answering.py | 181 +++++++++++++++++ .../pipelines/visual_question_answering.py | 189 +++--------------- .../models/layoutlm/test_modeling_layoutlm.py | 1 + 5 files changed, 242 insertions(+), 163 deletions(-) create mode 100644 src/transformers/pipelines/document_question_answering.py diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 42d538a2dc5c00..804478efcea40a 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -602,11 +602,16 @@ ) MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("vilt", "ViltForQuestionAnswering"), + ] +) + +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ ("layoutlm", "LayoutLMForQuestionAnswering"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), - ("vilt", "ViltForQuestionAnswering"), ] ) @@ -780,6 +785,9 @@ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES ) +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES) MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES @@ -898,6 +906,17 @@ class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass): ) +class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +AutoModelForDocumentQuestionAnswering = auto_class_update( + AutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example="impira/layoutlm-doc-qa", +) + + class AutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index ee7dee57c0e9d2..f521a054221443 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -69,6 +69,7 @@ TokenClassificationPipeline, ) from .visual_question_answering import VisualQuestionAnsweringPipeline +from .document_question_answering import DocumentQuestionAnsweringPipeline from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline from .zero_shot_image_classification import ZeroShotImageClassificationPipeline @@ -122,6 +123,7 @@ AutoModelForTokenClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, + AutoModelForDocumentQuestionAnswering, ) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel @@ -215,6 +217,15 @@ }, "type": "multimodal", }, + "document-question-answering": { + "impl": DocumentQuestionAnsweringPipeline, + "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), + "tf": (), + "default": { + "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")}, # TODO + }, + "type": "multimodal", + }, "fill-mask": { "impl": FillMaskPipeline, "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (), @@ -443,7 +454,7 @@ def pipeline( trust_remote_code: Optional[bool] = None, model_kwargs: Dict[str, Any] = None, pipeline_class: Optional[Any] = None, - **kwargs + **kwargs, ) -> Pipeline: """ Utility factory method to build a [`Pipeline`]. diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py new file mode 100644 index 00000000000000..c2d5b3c5778fda --- /dev/null +++ b/src/transformers/pipelines/document_question_answering.py @@ -0,0 +1,181 @@ +from typing import List, Optional, Tuple, Union + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging +from .base import PIPELINE_INIT_ARGS, Pipeline + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +logger = logging.get_logger(__name__) + +# TODO: +# 1. Should we make this a chunk pipeline for consistency with QAPipeline? +# 2. Should we switch padding default to "do_not_pad" and do the same "unsqueeze" trick as the qa pipeline? + + +def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_k): + # TODO: Test to make sure this works with tensorflow too (or break on the framework) + + # TODO: This is a very poor implementation of start/end (just here for completeness sake). + # Ideally we can refactor/borrow the implementation in the question answering pipeline. + results = [] + for i, (s, e) in enumerate(zip(model_outputs.start_logits.argmax(-1), model_outputs.end_logits.argmax(-1))): + if s > e: + continue + else: + word_start, word_end = word_ids[i][s], word_ids[i][e] + results.append( + { + "score": 0.5, # TODO + "answer": " ".join(words[word_start : word_end + 1]), + "start": word_start, + "end": word_end, + } + ) + + return results + + +@add_end_docstrings(PIPELINE_INIT_ARGS) +class DocumentQuestionAnsweringPipeline(Pipeline): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) + + # TODO: Borrow params from QA pipeline probably + def _sanitize_parameters( + self, + padding=None, + word_boxes=None, # TODO: Maybe rename to just words + doc_stride=None, + max_question_len=None, + max_seq_len=None, + top_k=None, + **kwargs, + ): + preprocess_params, postprocess_params = {}, {} + if padding is not None: + preprocess_params["padding"] = padding + if doc_stride is not None: + preprocess_params["doc_stride"] = doc_stride + if max_question_len is not None: + preprocess_params["max_question_len"] = max_question_len + if max_seq_len is not None: + preprocess_params["max_seq_len"] = max_seq_len + + if top_k is not None: + postprocess_params["top_k"] = top_k + + return preprocess_params, {}, postprocess_params + + # TODO: Borrow params from QA pipeline probably + def __call__( + self, + image: Union["Image.Image", str], + question: Optional[str] = None, + word_boxes: Tuple[str, List[float]] = None, + **kwargs, + ): + if isinstance(question, str): + inputs = {"question": question, "image": image, "word_boxes": word_boxes} + else: + inputs = image + return super().__call__(inputs, **kwargs) + + def preprocess( + self, + input, + padding="max_length", + doc_stride=None, + max_question_len=64, + max_seq_len=None, + word_boxes: Tuple[str, List[float]] = None, + ): + # NOTE: This code mirrors the code is question_answering.py + if max_seq_len is None: + # TODO: LayoutLM's stride is 512 by default. Is it ok to use that as the min + # instead of 384? + max_seq_len = min(self.tokenizer.model_max_length, 512) + if doc_stride is None: + doc_stride = min(max_seq_len // 2, 128) + + # TODO: Run OCR on the image if words is None + # I'll remove this assert once I implement OCR + assert input["word_boxes"], "This should be fixed and replaced with OCR" + + words = [x[0] for x in input["word_boxes"]] + boxes = [x[1] for x in input["word_boxes"]] + + encoding = self.tokenizer( + text=[input["question"]], + text_pair=words, + padding=padding, + truncation="only_second", + max_length=max_seq_len, + stride=doc_stride, + return_token_type_ids=True, + return_overflowing_tokens=True, + is_split_into_words=True, + # TODO: We should remove this if we want to remove the default padding + # and do an unsqueeze like the QA pipeline + return_tensors=self.framework, + ) + + num_spans = len(encoding["input_ids"]) + + bbox = [] + for batch_index in range(num_spans): + for i, s, w in zip( + encoding.input_ids[batch_index], + encoding.sequence_ids(batch_index), + encoding.word_ids(batch_index), + ): + if s == 1: + bbox.append(boxes[w]) + elif i == self.tokenizer.sep_token_id: + bbox.append([1000] * 4) + else: + bbox.append([0] * 4) + + if self.framework == "tf": + raise ValueError("Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + elif self.framework == "pt": + encoding["bbox"] = torch.tensor([bbox]) + + word_ids = [encoding.word_ids(i) for i in range(num_spans)] + + encoding.pop("overflow_to_sample_mapping", None) + return { + **encoding, + "word_ids": word_ids, + "words": words, + } + + def _forward(self, model_inputs): + word_ids = model_inputs.pop("word_ids", None) + words = model_inputs.pop("words", None) + + model_outputs = self.model(**model_inputs) + + model_outputs["word_ids"] = word_ids + model_outputs["words"] = words + return model_outputs + + def postprocess(self, model_outputs, top_k=5): + return postprocess_qa_output( + self.model, + model_outputs, + model_outputs["word_ids"], + model_outputs["words"], + self.framework, + top_k, + ) diff --git a/src/transformers/pipelines/visual_question_answering.py b/src/transformers/pipelines/visual_question_answering.py index 6987db8706241d..34a7a3b10d40fe 100644 --- a/src/transformers/pipelines/visual_question_answering.py +++ b/src/transformers/pipelines/visual_question_answering.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Union +from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline @@ -10,51 +10,11 @@ from ..image_utils import load_image if is_torch_available(): - import torch - from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING logger = logging.get_logger(__name__) -def postprocess_sequence_output(model, model_outputs, framework, top_k): - if top_k > model.config.num_labels: - top_k = model.config.num_labels - - if framework == "pt": - probs = model_outputs.logits.sigmoid()[0] - scores, ids = probs.topk(top_k) - else: - raise ValueError(f"Unsupported framework: {framework}") - - scores = scores.tolist() - ids = ids.tolist() - return [{"score": score, "answer": model.config.id2label[_id]} for score, _id in zip(scores, ids)] - - -def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_k): - # TODO: Test to make sure this works with tensorflow too (or break on the framework) - - # TODO: This is a very poor implementation of start/end (just here for completeness sake). - # Ideally we can refactor/borrow the implementation in the question answering pipeline. - results = [] - for i, (s, e) in enumerate(zip(model_outputs.start_logits.argmax(-1), model_outputs.end_logits.argmax(-1))): - if s > e: - continue - else: - word_start, word_end = word_ids[i][s], word_ids[i][e] - results.append( - { - "score": 0.5, # TODO - "answer": " ".join(words[word_start : word_end + 1]), - "start": word_start, - "end": word_end, - } - ) - - return results - - @add_end_docstrings(PIPELINE_INIT_ARGS) class VisualQuestionAnsweringPipeline(Pipeline): """ @@ -73,7 +33,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING) - def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, words=None, boxes=None, **kwargs): + def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, **kwargs): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding @@ -81,20 +41,9 @@ def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, words= preprocess_params["truncation"] = truncation if top_k is not None: postprocess_params["top_k"] = top_k - if words is not None or boxes is not None: - if words is None or boxes is None: - raise ValueError("Must provide both words and boxes if providing either") - preprocess_params["words"] = words - preprocess_params["boxes"] = boxes - return preprocess_params, {}, postprocess_params - def __call__( - self, - image: [Optional[Union["Image.Image", str]]] = None, - question: Optional[Union[List[str], str]] = None, - **kwargs, - ): + def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs): r""" Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below: @@ -125,10 +74,8 @@ def __call__( - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ - if (isinstance(image, (Image.Image, str)) or image is None) and isinstance(question, str): - inputs = {"question": question} - if image is not None: - inputs["image"] = image + if isinstance(image, (Image.Image, str)) and isinstance(question, str): + inputs = {"image": image, "question": question} else: """ Supports the following format @@ -140,109 +87,29 @@ def __call__( results = super().__call__(inputs, **kwargs) return results - def preprocess(self, inputs, padding=False, truncation=False, words=[], boxes=[]): - image_features = {} - if "image" in inputs: - image = load_image(inputs["image"]) - image_features = self.feature_extractor(images=image, return_tensors=self.framework) - - # TODO: If the image is specified, depending on the tokenizer, we should update inputs - # with the words/bounding boxes. - # - # TODO: LayoutLMv1 does not come with a feature extractor that can run OCR, but v2 and v3 do. - # We could either (a) require users of v1 pass in words (or implement a v1 feature extractor) - # or (b) build the OCR implementation into this pipeline (and expect users of v2 and v3 to - # instantiate the pipeline with feature extractors that have OCR disabled). - - if not ("image" in inputs or "words" in inputs): - raise ValueError("Must provide at least one of an image or words/bounding boxes") - - question = inputs["question"] - text_pair = None - - extra_tokenizer_params = {} - if "words" in inputs: - # TODO: Can we refactor and share (or inherit from) the QuestionAnweringPipeline? I think we want to do - # the same thing with the tokenizer, except we have a few extra arguments (e.g. the bounding boxes). - extra_tokenizer_params = { - "return_token_type_ids": True, - "return_attention_mask": True, - "is_split_into_words": True, - # "max_length": 512, - } - padding = "max_length" - truncation = True - question = [question] - text_pair = inputs["words"] - - encoding = self.tokenizer( - text=question, - text_pair=text_pair, - return_tensors=self.framework, - padding=padding, - truncation=truncation, - **extra_tokenizer_params, + def preprocess(self, inputs, padding=False, truncation=False): + image = load_image(inputs["image"]) + model_inputs = self.tokenizer( + inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation ) + image_features = self.feature_extractor(images=image, return_tensors=self.framework) + model_inputs.update(image_features) + return model_inputs - num_spans = len(encoding["input_ids"]) - - if "boxes" in inputs: - boxes = inputs["boxes"] - bbox = [] - for batch_index in range(num_spans): - for i, s, w in zip( - encoding.input_ids[batch_index], - encoding.sequence_ids(batch_index), - encoding.word_ids(batch_index), - ): - if s == 1: - bbox.append(boxes[w]) - elif i == self.tokenizer.sep_token_id: - bbox.append([1000] * 4) - else: - bbox.append([0] * 4) - - if self.framework == "tf": - # TODO implement - pass - elif self.framework == "pt": - encoding["bbox"] = torch.tensor([bbox]) - - # TODO: Handle multiple spans. We'll basically want to duplicate the image features for each span - # (we can then also remove this assert) - assert len(image_features) == 0 or list(image_features.items())[0].size(0) == list(encoding.items())[0].size(0) - encoding.update(image_features) - - # TODO: I think it's cleaner to place the encoding and other context in the dict in separate keys - # instead of flat at the top level. I'm happy to undo this though if it's not in line with - # other parts of the code. - return { - "encoding": encoding, - "word_ids": [encoding.word_ids(i) for i in range(len(encoding["input_ids"]))], - "sequence_ids": [encoding.sequence_ids(i) for i in range(len(encoding["input_ids"]))], - "inputs": inputs, - } - - def _forward(self, inputs): - model_outputs = self.model(**inputs["encoding"]) - return {"outputs": model_outputs, "inputs": inputs} - - def postprocess(self, result, top_k=5): - model_outputs = result["outputs"] - - # TODO: Is there a better way to do this? I tried using - # isinstance(model_outputs, SequenceClassifierOutput) but that thinks model_outputs - # is transformers.utils.generic.ModelOutput - if "logits" in model_outputs: - return postprocess_sequence_output(self.model, model_outputs, self.framework, top_k) - elif "start_logits" in model_outputs and "end_logits" in model_outputs: - return postprocess_qa_output( - self.model, - model_outputs, - result["inputs"]["word_ids"], - result["inputs"]["inputs"]["words"], - self.framework, - top_k, - ) + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, top_k=5): + if top_k > self.model.config.num_labels: + top_k = self.model.config.num_labels + + if self.framework == "pt": + probs = model_outputs.logits.sigmoid()[0] + scores, ids = probs.topk(top_k) else: - assert False, "Unknown output format" + raise ValueError(f"Unsupported framework: {self.framework}") + + scores = scores.tolist() + ids = ids.tolist() + return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index cce3c9b3f48615..b04d68a626edb5 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -39,6 +39,7 @@ ) + class LayoutLMModelTester: """You can also import this e.g from .test_modeling_layoutlm import LayoutLMModelTester""" From 56618d01b66b0a76600ae02b937037532d85c1ed Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 3 Aug 2022 23:00:03 -0700 Subject: [PATCH 05/34] Cleanup --- src/transformers/pipelines/__init__.py | 4 ++-- src/transformers/pipelines/document_question_answering.py | 3 ++- tests/models/layoutlm/test_modeling_layoutlm.py | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index f521a054221443..50c8d3f49d3e48 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -51,6 +51,7 @@ infer_framework_load_model, ) from .conversational import Conversation, ConversationalPipeline +from .document_question_answering import DocumentQuestionAnsweringPipeline from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline from .image_classification import ImageClassificationPipeline @@ -69,7 +70,6 @@ TokenClassificationPipeline, ) from .visual_question_answering import VisualQuestionAnsweringPipeline -from .document_question_answering import DocumentQuestionAnsweringPipeline from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline from .zero_shot_image_classification import ZeroShotImageClassificationPipeline @@ -110,6 +110,7 @@ AutoModelForAudioClassification, AutoModelForCausalLM, AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForMaskedLM, @@ -123,7 +124,6 @@ AutoModelForTokenClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, - AutoModelForDocumentQuestionAnswering, ) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index c2d5b3c5778fda..374be6203dcffa 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -7,7 +7,8 @@ if is_vision_available(): from PIL import Image - from ..image_utils import load_image + # TODO Will re-introduce when I add images back in + # from ..image_utils import load_image if is_torch_available(): import torch diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index b04d68a626edb5..cce3c9b3f48615 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -39,7 +39,6 @@ ) - class LayoutLMModelTester: """You can also import this e.g from .test_modeling_layoutlm import LayoutLMModelTester""" From 229920a600670a5fad17b3fee3cf1400c554bd63 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Thu, 4 Aug 2022 22:34:16 -0700 Subject: [PATCH 06/34] Improve args, docs, and implement preprocessing --- .../pipelines/document_question_answering.py | 131 +++++++++++++++--- 1 file changed, 115 insertions(+), 16 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 374be6203dcffa..f0fd454d256894 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -48,6 +48,19 @@ def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_ @add_end_docstrings(PIPELINE_INIT_ARGS) class DocumentQuestionAnsweringPipeline(Pipeline): + # TODO: Update task_summary docs to include an example with document QA and then update the first sentence + """ + Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. See the [question answering + examples](../task_summary#question-answering) for more information. + + This document question answering pipeline can currently be loaded from [`pipeline`] using the following task + identifier: `"document-question-answering"`. + + The models that this pipeline can use are models that have been fine-tuned on a document question answering task. + See the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). + """ + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) @@ -56,11 +69,14 @@ def __init__(self, *args, **kwargs): def _sanitize_parameters( self, padding=None, - word_boxes=None, # TODO: Maybe rename to just words doc_stride=None, max_question_len=None, + lang: Optional[str] = None, + tesseract_config: Optional[str] = None, + max_answer_len=None, max_seq_len=None, top_k=None, + handle_impossible_answer=None, **kwargs, ): preprocess_params, postprocess_params = {}, {} @@ -72,13 +88,24 @@ def _sanitize_parameters( preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len + if lang is not None: + preprocess_params["lang"] = lang + if tesseract_config is not None: + preprocess_params["tesseract_config"] = tesseract_config if top_k is not None: + if top_k < 1: + raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k + if max_answer_len is not None: + if max_answer_len < 1: + raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") + postprocess_params["max_answer_len"] = max_answer_len + if handle_impossible_answer is not None: + postprocess_params["handle_impossible_answer"] = handle_impossible_answer return preprocess_params, {}, postprocess_params - # TODO: Borrow params from QA pipeline probably def __call__( self, image: Union["Image.Image", str], @@ -86,28 +113,96 @@ def __call__( word_boxes: Tuple[str, List[float]] = None, **kwargs, ): + # TODO: + # - Should we attempt to support a batch of inputs like the question answering pipeline? + # - Implement top-k (may come for free when integrating the QA post processor) + """ + Answer the question(s) given as inputs by using the context(s). The pipeline accepts an image and question, as + well as an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` + are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes + automatically. + + You can invoke the pipeline several ways: + + - `pipeline(image=image, question=question)` + - `pipeline(image=image, question=question, word_boxes=word_boxes)` + - `pipeline([{"image": image, "question": question}])` + - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` + + Args: + image (`str` or `PIL.Image`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. If given a single image, it can be + broadcasted to multiple questions. + question (`str`): + A question to ask of the document. + word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): + A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the + pipeline will use these words and boxes instead of running OCR on the image to derive them. This allows + you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each + time. + top_k (`int`, *optional*, defaults to 1): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + top_k answers if there are not enough options available within the context. + doc_stride (`int`, *optional*, defaults to 128): + If the words in the document are too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + max_answer_len (`int`, *optional*, defaults to 15): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_seq_len (`int`, *optional*, defaults to 384): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. + max_question_len (`int`, *optional*, defaults to 64): + The maximum length of the question after tokenization. It will be truncated if needed. + handle_impossible_answer (`bool`, *optional*, defaults to `False`): + Whether or not we accept impossible as an answer. + lang (`str`, *optional*): + Language to use while running OCR. Defaults to english. + tesseract_config (`str`, *optional*): + Additional flags to pass to tesseract while running OCR. + + Return: + A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: + + - **score** (`float`) -- The probability associated to the answer. + - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided + `word_boxes`). + - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided + `word_boxes`). + - **answer** (`str`) -- The answer to the question. + """ if isinstance(question, str): inputs = {"question": question, "image": image, "word_boxes": word_boxes} else: inputs = image return super().__call__(inputs, **kwargs) + # TODO: + # 1) Use apply_tesseract from layoutlmv3/feature_extraction_layoutlmv3 def preprocess( self, input, - padding="max_length", + padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None, word_boxes: Tuple[str, List[float]] = None, ): - # NOTE: This code mirrors the code is question_answering.py - if max_seq_len is None: - # TODO: LayoutLM's stride is 512 by default. Is it ok to use that as the min - # instead of 384? - max_seq_len = min(self.tokenizer.model_max_length, 512) - if doc_stride is None: - doc_stride = min(max_seq_len // 2, 128) + # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR + # to support documents with enough tokens that overflow the model's window + # if max_seq_len is None: + # # TODO: LayoutLM's stride is 512 by default. Is it ok to use that as the min + # # instead of 384 (which the QA model uses)? + # max_seq_len = min(self.tokenizer.model_max_length, 512) + + if doc_stride is not None: + raise ValueError("Unsupported: striding inputs") + # doc_stride = min(max_seq_len // 2, 128) # TODO: Run OCR on the image if words is None # I'll remove this assert once I implement OCR @@ -117,22 +212,26 @@ def preprocess( boxes = [x[1] for x in input["word_boxes"]] encoding = self.tokenizer( - text=[input["question"]], + text=input["question"].split(), text_pair=words, padding=padding, - truncation="only_second", max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, - return_overflowing_tokens=True, is_split_into_words=True, - # TODO: We should remove this if we want to remove the default padding - # and do an unsqueeze like the QA pipeline return_tensors=self.framework, + # TODO: In a future PR, use these feature to handle sequences whose length is longer than + # the maximum allowed by the model. Currently, the tokenizer will produce a sequence that + # may be too long for the model to handle. + # truncation="only_second", + # return_overflowing_tokens=True, ) + # TODO: For now, this should always be num_spans=1 given the flags we've passed in above num_spans = len(encoding["input_ids"]) + # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] + # for SEP tokens, and the word's bounding box for words in the original document. bbox = [] for batch_index in range(num_spans): for i, s, w in zip( @@ -148,7 +247,7 @@ def preprocess( bbox.append([0] * 4) if self.framework == "tf": - raise ValueError("Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") elif self.framework == "pt": encoding["bbox"] = torch.tensor([bbox]) From 0e3908039a9c93b2b057e51b23716be58a57a367 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Fri, 5 Aug 2022 12:27:33 -0700 Subject: [PATCH 07/34] Integrate OCR --- .../pipelines/document_question_answering.py | 97 +++++++++++++++---- 1 file changed, 80 insertions(+), 17 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index f0fd454d256894..db183d1df0ba63 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -1,6 +1,6 @@ from typing import List, Optional, Tuple, Union -from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging +from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline @@ -8,24 +8,66 @@ from PIL import Image # TODO Will re-introduce when I add images back in - # from ..image_utils import load_image + from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING +TESSERACT_LOADED = False +if is_pytesseract_available(): + TESSERACT_LOADED = True + import pytesseract logger = logging.get_logger(__name__) -# TODO: -# 1. Should we make this a chunk pipeline for consistency with QAPipeline? -# 2. Should we switch padding default to "do_not_pad" and do the same "unsqueeze" trick as the qa pipeline? +# normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. +# However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an +# unecessary dependency. +def normalize_box(box, width, height): + return [ + int(1000 * (box[0] / width)), + int(1000 * (box[1] / height)), + int(1000 * (box[2] / width)), + int(1000 * (box[3] / height)), + ] -def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_k): - # TODO: Test to make sure this works with tensorflow too (or break on the framework) +def apply_tesseract(image: Image.Image, lang: Optional[str], tesseract_config: Optional[str]): + """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" + # apply OCR + data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) + words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] + + # filter empty words and corresponding coordinates + irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] + words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] + left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] + top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] + width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] + height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] + + # turn coordinates into (left, top, left+width, top+height) format + actual_boxes = [] + for x, y, w, h in zip(left, top, width, height): + actual_box = [x, y, x + w, y + h] + actual_boxes.append(actual_box) + + image_width, image_height = image.size + + # finally, normalize the bounding boxes + normalized_boxes = [] + for box in actual_boxes: + normalized_boxes.append(normalize_box(box, image_width, image_height)) + + assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes" + + return words, normalized_boxes + + +def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_k): # TODO: This is a very poor implementation of start/end (just here for completeness sake). # Ideally we can refactor/borrow the implementation in the question answering pipeline. results = [] @@ -65,7 +107,6 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) - # TODO: Borrow params from QA pipeline probably def _sanitize_parameters( self, padding=None, @@ -182,8 +223,6 @@ def __call__( inputs = image return super().__call__(inputs, **kwargs) - # TODO: - # 1) Use apply_tesseract from layoutlmv3/feature_extraction_layoutlmv3 def preprocess( self, input, @@ -192,6 +231,8 @@ def preprocess( max_question_len=64, max_seq_len=None, word_boxes: Tuple[str, List[float]] = None, + lang=None, + tesseract_config="", ): # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR # to support documents with enough tokens that overflow the model's window @@ -204,13 +245,35 @@ def preprocess( raise ValueError("Unsupported: striding inputs") # doc_stride = min(max_seq_len // 2, 128) - # TODO: Run OCR on the image if words is None - # I'll remove this assert once I implement OCR - assert input["word_boxes"], "This should be fixed and replaced with OCR" - - words = [x[0] for x in input["word_boxes"]] - boxes = [x[1] for x in input["word_boxes"]] + image = None + image_features = {} + if "image" in input: + image = load_image(input["image"]) + if self.feature_extractor is not None: + image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) + + words, boxes = None, None + if "word_boxes" in input: + words = [x[0] for x in input["word_boxes"]] + boxes = [x[1] for x in input["word_boxes"]] + elif "words" in image_features and "boxes" in image_features: + words = image_features.pop("words") + boxes = image_features.pop("boxes") + elif image is not None: + if not TESSERACT_LOADED: + raise ValueError( + "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract, but" + " pytesseract is not available" + ) + words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) + else: + raise ValueError( + "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically run" + " OCR to derive words and boxes" + ) + # TODO: The safe way to do this is to call the tokenizer in succession on each token and insert the CLS/SEP + # tokens ourselves. encoding = self.tokenizer( text=input["question"].split(), text_pair=words, @@ -227,7 +290,7 @@ def preprocess( # return_overflowing_tokens=True, ) - # TODO: For now, this should always be num_spans=1 given the flags we've passed in above + # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above num_spans = len(encoding["input_ids"]) # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] From 355ddc9d8c22d9f62c44c97929a1d1cd347d545a Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 6 Aug 2022 13:32:22 -0700 Subject: [PATCH 08/34] Refactor question_answering pipeline --- .../pipelines/document_question_answering.py | 23 +++ .../pipelines/question_answering.py | 184 +++++++++++------- 2 files changed, 132 insertions(+), 75 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index db183d1df0ba63..a1a17dbee98cfb 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -1,5 +1,7 @@ from typing import List, Optional, Tuple, Union +import numpy as np + from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline @@ -272,6 +274,12 @@ def preprocess( " OCR to derive words and boxes" ) + if self.tokenizer.padding_side != "right": + raise ValueError( + "Document question answering only supports tokenizers whose padding side is 'right', not" + f" {self.tokenizer.padding_side}" + ) + # TODO: The safe way to do this is to call the tokenizer in succession on each token and insert the CLS/SEP # tokens ourselves. encoding = self.tokenizer( @@ -293,6 +301,17 @@ def preprocess( # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above num_spans = len(encoding["input_ids"]) + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) + p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] + for span_idx in range(num_spans): + input_ids_span_idx = encoding["input_ids"][span_idx] + # keep the cls_token unmasked (some models use it to indicate unanswerable questions) + if self.tokenizer.cls_token_id is not None: + cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] + for cls_index in cls_indices: + p_mask[span_idx][cls_index] = 0 + # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] # for SEP tokens, and the word's bounding box for words in the original document. bbox = [] @@ -319,18 +338,22 @@ def preprocess( encoding.pop("overflow_to_sample_mapping", None) return { **encoding, + "p_mask": p_mask, "word_ids": word_ids, "words": words, } def _forward(self, model_inputs): + p_mask = model_inputs.pop("p_mask", None) word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) model_outputs = self.model(**model_inputs) + model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words + model_outputs["attention_mask"] = model_inputs["attention_mask"] return model_outputs def postprocess(self, model_outputs, top_k=5): diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 918931a2cac47e..5148f467c9ad53 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -42,6 +42,110 @@ from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING +def decode_spans( + start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray +) -> Tuple: + """ + Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual + answer. + + In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or + answer end position being before the starting position. The method supports output the k-best answer through the + topk argument. + + Args: + start (`np.ndarray`): Individual start probabilities for each token. + end (`np.ndarray`): Individual end probabilities for each token. + topk (`int`): Indicates how many possible answer span(s) to extract from the model output. + max_answer_len (`int`): Maximum size of the answer to extract from the model's output. + undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer + """ + # Ensure we have batch axis + if start.ndim == 1: + start = start[None] + + if end.ndim == 1: + end = end[None] + + # Compute the score of each tuple(start, end) to be the real answer + outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) + + # Remove candidate with end < start and end - start > max_answer_len + candidates = np.tril(np.triu(outer), max_answer_len - 1) + + # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) + scores_flat = candidates.flatten() + if topk == 1: + idx_sort = [np.argmax(scores_flat)] + elif len(scores_flat) < topk: + idx_sort = np.argsort(-scores_flat) + else: + idx = np.argpartition(-scores_flat, topk)[0:topk] + idx_sort = idx[np.argsort(-scores_flat[idx])] + + starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] + desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) + starts = starts[desired_spans] + ends = ends[desired_spans] + scores = candidates[0, starts, ends] + + return starts, ends, scores + + +def select_starts_ends( + start, + end, + p_mask, + attention_mask, + min_null_score=1000000, + top_k=1, + handle_impossible_answer=False, + max_answer_len=15, +): + """ + Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses + `decode_spans()` to generate probabilities for each span to be the actual answer. + + Args: + start (`np.ndarray`): Individual start probabilities for each token. + end (`np.ndarray`): Individual end probabilities for each token. + p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer + attention_mask (`np.ndarray`): The attention mask generated by the tokenizer + min_null_score(`float`): The minimum null (empty) answer score seen so far. + topk (`int`): Indicates how many possible answer span(s) to extract from the model output. + handle_impossible_answer(`bool`): Whether to allow null (empty) answers + max_answer_len (`int`): Maximum size of the answer to extract from the model's output. + """ + # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. + undesired_tokens = np.abs(np.array(p_mask) - 1) + + if attention_mask is not None: + undesired_tokens = undesired_tokens & attention_mask + + # Generate mask + undesired_tokens_mask = undesired_tokens == 0.0 + + # Make sure non-context indexes in the tensor cannot contribute to the softmax + start = np.where(undesired_tokens_mask, -10000.0, start) + end = np.where(undesired_tokens_mask, -10000.0, end) + + # Normalize logits and spans to retrieve the answer + start = np.exp(start - start.max(axis=-1, keepdims=True)) + start = start / start.sum() + + end = np.exp(end - end.max(axis=-1, keepdims=True)) + end = end / end.sum() + + if handle_impossible_answer: + min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item()) + + # Mask CLS + start[0, 0] = end[0, 0] = 0.0 + + starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens) + return starts, ends, scores, min_null_score + + class QuestionAnsweringArgumentHandler(ArgumentHandler): """ QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to @@ -410,34 +514,13 @@ def postprocess( start_ = output["start"] end_ = output["end"] example = output["example"] + p_mask = output["p_mask"] + attention_mask = output["attention_mask"].numpy() if "attention_mask" in output else None - # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. - undesired_tokens = np.abs(np.array(output["p_mask"]) - 1) - - if output.get("attention_mask", None) is not None: - undesired_tokens = undesired_tokens & output["attention_mask"].numpy() - - # Generate mask - undesired_tokens_mask = undesired_tokens == 0.0 - - # Make sure non-context indexes in the tensor cannot contribute to the softmax - start_ = np.where(undesired_tokens_mask, -10000.0, start_) - end_ = np.where(undesired_tokens_mask, -10000.0, end_) - - # Normalize logits and spans to retrieve the answer - start_ = np.exp(start_ - start_.max(axis=-1, keepdims=True)) - start_ = start_ / start_.sum() - - end_ = np.exp(end_ - end_.max(axis=-1, keepdims=True)) - end_ = end_ / end_.sum() - - if handle_impossible_answer: - min_null_score = min(min_null_score, (start_[0, 0] * end_[0, 0]).item()) - - # Mask CLS - start_[0, 0] = end_[0, 0] = 0.0 + starts, ends, scores, min_null_score = select_starts_ends( + start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len + ) - starts, ends, scores = self.decode(start_, end_, top_k, max_answer_len, undesired_tokens) if not self.tokenizer.is_fast: char_to_word = np.array(example.char_to_word_offset) @@ -518,55 +601,6 @@ def get_indices( end_index = enc.offsets[e][1] return start_index, end_index - def decode( - self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray - ) -> Tuple: - """ - Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the - actual answer. - - In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or - answer end position being before the starting position. The method supports output the k-best answer through - the topk argument. - - Args: - start (`np.ndarray`): Individual start probabilities for each token. - end (`np.ndarray`): Individual end probabilities for each token. - topk (`int`): Indicates how many possible answer span(s) to extract from the model output. - max_answer_len (`int`): Maximum size of the answer to extract from the model's output. - undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer - """ - # Ensure we have batch axis - if start.ndim == 1: - start = start[None] - - if end.ndim == 1: - end = end[None] - - # Compute the score of each tuple(start, end) to be the real answer - outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) - - # Remove candidate with end < start and end - start > max_answer_len - candidates = np.tril(np.triu(outer), max_answer_len - 1) - - # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) - scores_flat = candidates.flatten() - if topk == 1: - idx_sort = [np.argmax(scores_flat)] - elif len(scores_flat) < topk: - idx_sort = np.argsort(-scores_flat) - else: - idx = np.argpartition(-scores_flat, topk)[0:topk] - idx_sort = idx[np.argsort(-scores_flat[idx])] - - starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] - desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) - starts = starts[desired_spans] - ends = ends[desired_spans] - scores = candidates[0, starts, ends] - - return starts, ends, scores - def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: """ When decoding from token probabilities, this method maps token indexes to actual word in the initial context. From afdbdaa1fc44c9d679904536b32509a999609ad4 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 6 Aug 2022 13:56:05 -0700 Subject: [PATCH 09/34] Use refactored QA code in the document qa pipeline --- .../pipelines/document_question_answering.py | 49 +++++++++++++++---- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index a1a17dbee98cfb..ab17f2c7d8e08d 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -4,6 +4,7 @@ from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline +from .question_answering import select_starts_ends if is_vision_available(): @@ -156,9 +157,6 @@ def __call__( word_boxes: Tuple[str, List[float]] = None, **kwargs, ): - # TODO: - # - Should we attempt to support a batch of inputs like the question answering pipeline? - # - Implement top-k (may come for free when integrating the QA post processor) """ Answer the question(s) given as inputs by using the context(s). The pipeline accepts an image and question, as well as an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` @@ -356,12 +354,43 @@ def _forward(self, model_inputs): model_outputs["attention_mask"] = model_inputs["attention_mask"] return model_outputs - def postprocess(self, model_outputs, top_k=5): - return postprocess_qa_output( - self.model, - model_outputs, - model_outputs["word_ids"], - model_outputs["words"], - self.framework, + def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15): + min_null_score = 1000000 # large and positive + answers = [] + words = model_outputs["words"] + + # Currently, we expect the length of model_outputs to be 1, because we do not stride + # in the preprocessor code. But this code is written generally (like the question_answering + # pipeline) to support that scenario + starts, ends, scores, min_null_score = select_starts_ends( + model_outputs["start_logits"], + model_outputs["end_logits"], + model_outputs["p_mask"], + model_outputs["attention_mask"].numpy() if "attention_mask" in model_outputs else None, + min_null_score, top_k, + handle_impossible_answer, + max_answer_len, ) + + word_ids = model_outputs["word_ids"][0] + for s, e, score in zip(starts, ends, scores): + word_start, word_end = word_ids[s], word_ids[e] + answers.append( + { + "score": score, + "answer": " ".join(words[word_start : word_end + 1]), + "start": word_start, + "end": word_end, + } + ) + + print(handle_impossible_answer) + if handle_impossible_answer: + answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) + print(answers[-1]) + + answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] + if len(answers) == 1: + return answers[0] + return answers From 33933954b111c1853a93cdd2758ce012e02c36ac Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 6 Aug 2022 15:18:25 -0700 Subject: [PATCH 10/34] Fix tests --- src/transformers/pipelines/document_question_answering.py | 2 +- src/transformers/pipelines/question_answering.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index ab17f2c7d8e08d..5c939ec47e52a7 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -366,7 +366,7 @@ def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, ma model_outputs["start_logits"], model_outputs["end_logits"], model_outputs["p_mask"], - model_outputs["attention_mask"].numpy() if "attention_mask" in model_outputs else None, + model_outputs["attention_mask"].numpy() if model_outputs.get("attention_mask", None) is not None else None, min_null_score, top_k, handle_impossible_answer, diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 5148f467c9ad53..8fd34408f5f7b1 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -515,7 +515,9 @@ def postprocess( end_ = output["end"] example = output["example"] p_mask = output["p_mask"] - attention_mask = output["attention_mask"].numpy() if "attention_mask" in output else None + attention_mask = ( + output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None + ) starts, ends, scores, min_null_score = select_starts_ends( start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len From fe83056d4e1be28b989355cee574a8c96f92c84d Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 6 Aug 2022 15:36:50 -0700 Subject: [PATCH 11/34] Some small cleanups --- .../pipelines/document_question_answering.py | 49 ++++++------------- 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 5c939ec47e52a7..eb89f0591fd75d 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -70,27 +70,6 @@ def apply_tesseract(image: Image.Image, lang: Optional[str], tesseract_config: O return words, normalized_boxes -def postprocess_qa_output(model, model_outputs, word_ids, words, framework, top_k): - # TODO: This is a very poor implementation of start/end (just here for completeness sake). - # Ideally we can refactor/borrow the implementation in the question answering pipeline. - results = [] - for i, (s, e) in enumerate(zip(model_outputs.start_logits.argmax(-1), model_outputs.end_logits.argmax(-1))): - if s > e: - continue - else: - word_start, word_end = word_ids[i][s], word_ids[i][e] - results.append( - { - "score": 0.5, # TODO - "answer": " ".join(words[word_start : word_end + 1]), - "start": word_start, - "end": word_end, - } - ) - - return results - - @add_end_docstrings(PIPELINE_INIT_ARGS) class DocumentQuestionAnsweringPipeline(Pipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence @@ -158,10 +137,9 @@ def __call__( **kwargs, ): """ - Answer the question(s) given as inputs by using the context(s). The pipeline accepts an image and question, as - well as an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` - are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes - automatically. + Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an + optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not + provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically. You can invoke the pipeline several ways: @@ -242,8 +220,9 @@ def preprocess( # max_seq_len = min(self.tokenizer.model_max_length, 512) if doc_stride is not None: - raise ValueError("Unsupported: striding inputs") + # TODO implement # doc_stride = min(max_seq_len // 2, 128) + raise ValueError("Unsupported: striding inputs") image = None image_features = {} @@ -278,8 +257,6 @@ def preprocess( f" {self.tokenizer.padding_side}" ) - # TODO: The safe way to do this is to call the tokenizer in succession on each token and insert the CLS/SEP - # tokens ourselves. encoding = self.tokenizer( text=input["question"].split(), text_pair=words, @@ -296,11 +273,13 @@ def preprocess( # return_overflowing_tokens=True, ) - # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above + # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above, but the + # code is written to naturally handle multiple spans at the right time. num_spans = len(encoding["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) + # This logic mirrors the logic in the question_answering pipeline p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] for span_idx in range(num_spans): input_ids_span_idx = encoding["input_ids"][span_idx] @@ -333,7 +312,9 @@ def preprocess( word_ids = [encoding.word_ids(i) for i in range(num_spans)] - encoding.pop("overflow_to_sample_mapping", None) + # TODO This will be necessary when we implement overflow support + # encoding.pop("overflow_to_sample_mapping", None) + return { **encoding, "p_mask": p_mask, @@ -359,9 +340,9 @@ def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, ma answers = [] words = model_outputs["words"] - # Currently, we expect the length of model_outputs to be 1, because we do not stride - # in the preprocessor code. But this code is written generally (like the question_answering - # pipeline) to support that scenario + # TODO: Currently, we expect the length of model_outputs to be 1, because we do not stride + # in the preprocessor code. When we implement that, we'll either need to handle tensors of size + # > 1 or use the ChunkPipeline and handle multiple outputs (each of size = 1). starts, ends, scores, min_null_score = select_starts_ends( model_outputs["start_logits"], model_outputs["end_logits"], @@ -385,10 +366,8 @@ def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, ma } ) - print(handle_impossible_answer) if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) - print(answers[-1]) answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] if len(answers) == 1: From 391f98dadbad6cc97acbd70019c4891e228da17d Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 6 Aug 2022 15:44:24 -0700 Subject: [PATCH 12/34] Use a string type annotation for Image.Image --- src/transformers/pipelines/document_question_answering.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index eb89f0591fd75d..19c26fafe48f6a 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -10,7 +10,6 @@ if is_vision_available(): from PIL import Image - # TODO Will re-introduce when I add images back in from ..image_utils import load_image if is_torch_available(): @@ -38,7 +37,7 @@ def normalize_box(box, width, height): ] -def apply_tesseract(image: Image.Image, lang: Optional[str], tesseract_config: Optional[str]): +def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) From 7f67d9239c2471b638fc1ea103c6ac167532591a Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 6 Aug 2022 16:47:16 -0700 Subject: [PATCH 13/34] Update encoding with image features --- src/transformers/pipelines/document_question_answering.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 19c26fafe48f6a..ba367b8cd29ece 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -271,6 +271,7 @@ def preprocess( # truncation="only_second", # return_overflowing_tokens=True, ) + encoding.update(image_features) # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above, but the # code is written to naturally handle multiple spans at the right time. From 27790d7d97beb2fe10de7841c7cc60b5f67d5314 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sun, 7 Aug 2022 09:48:51 -0700 Subject: [PATCH 14/34] Wire through the basic docs --- docs/source/en/main_classes/pipelines.mdx | 7 +++++++ docs/source/en/model_doc/auto.mdx | 4 ++++ src/transformers/__init__.py | 4 ++++ src/transformers/models/auto/__init__.py | 2 ++ src/transformers/utils/dummy_pt_objects.py | 7 +++++++ 5 files changed, 24 insertions(+) diff --git a/docs/source/en/main_classes/pipelines.mdx b/docs/source/en/main_classes/pipelines.mdx index b2de7e048dd5aa..4043a00009e22d 100644 --- a/docs/source/en/main_classes/pipelines.mdx +++ b/docs/source/en/main_classes/pipelines.mdx @@ -25,6 +25,7 @@ There are two categories of pipeline abstractions to be aware about: - [`AudioClassificationPipeline`] - [`AutomaticSpeechRecognitionPipeline`] - [`ConversationalPipeline`] + - [`DocumentQuestionAnsweringPipeline`] - [`FeatureExtractionPipeline`] - [`FillMaskPipeline`] - [`ImageClassificationPipeline`] @@ -342,6 +343,12 @@ That should enable you to do all the custom code you want. - __call__ - all +### DocumentQuestionAnsweringPipeline + +[[autodoc]] DocumentQuestionAnsweringPipeline + - __call__ + - all + ### FeatureExtractionPipeline [[autodoc]] FeatureExtractionPipeline diff --git a/docs/source/en/model_doc/auto.mdx b/docs/source/en/model_doc/auto.mdx index 995296485b9402..af73c13d4660e5 100644 --- a/docs/source/en/model_doc/auto.mdx +++ b/docs/source/en/model_doc/auto.mdx @@ -114,6 +114,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] AutoModelForTableQuestionAnswering +## AutoModelForDocumentQuestionAnswering + +[[autodoc]] AutoModelForDocumentQuestionAnswering + ## AutoModelForImageClassification [[autodoc]] AutoModelForImageClassification diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 4651c3b5b9e921..e190ae960ae221 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -383,6 +383,7 @@ "Conversation", "ConversationalPipeline", "CsvPipelineDataFormat", + "DocumentQuestionAnsweringPipeline", "FeatureExtractionPipeline", "FillMaskPipeline", "ImageClassificationPipeline", @@ -816,6 +817,7 @@ "AutoModelForAudioXVector", "AutoModelForCausalLM", "AutoModelForCTC", + "AutoModelForDocumentQuestionAnswering", "AutoModelForImageClassification", "AutoModelForImageSegmentation", "AutoModelForInstanceSegmentation", @@ -3200,6 +3202,7 @@ Conversation, ConversationalPipeline, CsvPipelineDataFormat, + DocumentQuestionAnsweringPipeline, FeatureExtractionPipeline, FillMaskPipeline, ImageClassificationPipeline, @@ -3576,6 +3579,7 @@ AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index ec253f6037a3d3..697252464aa3c6 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -93,6 +93,7 @@ "AutoModelForVideoClassification", "AutoModelForVision2Seq", "AutoModelForVisualQuestionAnswering", + "AutoModelForDocumentQuestionAnswering", "AutoModelWithLMHead", ] @@ -218,6 +219,7 @@ AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 32ba979f78b62b..c0e68ab3551720 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -463,6 +463,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class AutoModelForImageClassification(metaclass=DummyObject): _backends = ["torch"] From b71835d17ef9a7e913c342865e81c69b95e57907 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sun, 7 Aug 2022 17:06:10 -0700 Subject: [PATCH 15/34] Handle invalid response --- .../pipelines/document_question_answering.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index ba367b8cd29ece..382cc3bd90a087 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -357,14 +357,15 @@ def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, ma word_ids = model_outputs["word_ids"][0] for s, e, score in zip(starts, ends, scores): word_start, word_end = word_ids[s], word_ids[e] - answers.append( - { - "score": score, - "answer": " ".join(words[word_start : word_end + 1]), - "start": word_start, - "end": word_end, - } - ) + if word_start is not None and word_end is not None: + answers.append( + { + "score": score, + "answer": " ".join(words[word_start : word_end + 1]), + "start": word_start, + "end": word_end, + } + ) if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) From 8a4d8aaa9daf5e6b7d83244ca3c05db0ac378f20 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 8 Aug 2022 06:55:56 -0700 Subject: [PATCH 16/34] Handle empty word_boxes properly --- src/transformers/pipelines/document_question_answering.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 382cc3bd90a087..0928c358149cfd 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -195,7 +195,9 @@ def __call__( - **answer** (`str`) -- The answer to the question. """ if isinstance(question, str): - inputs = {"question": question, "image": image, "word_boxes": word_boxes} + inputs = {"question": question, "image": image} + if word_boxes is not None: + inputs["word_boxes"] = word_boxes else: inputs = image return super().__call__(inputs, **kwargs) From 2966e4fb0d77dcb77d32469838b6a7fdd5c2fc07 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 8 Aug 2022 12:03:08 -0700 Subject: [PATCH 17/34] Docstring fix --- src/transformers/pipelines/question_answering.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 8fd34408f5f7b1..6a1a0011c5efc1 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -107,8 +107,8 @@ def select_starts_ends( `decode_spans()` to generate probabilities for each span to be the actual answer. Args: - start (`np.ndarray`): Individual start probabilities for each token. - end (`np.ndarray`): Individual end probabilities for each token. + start (`np.ndarray`): Individual start logits for each token. + end (`np.ndarray`): Individual end logits for each token. p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer attention_mask (`np.ndarray`): The attention mask generated by the tokenizer min_null_score(`float`): The minimum null (empty) answer score seen so far. From e852fc3ff7998a1f211fbf0bcf5281de2ac2aae4 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 13 Aug 2022 12:28:07 -0700 Subject: [PATCH 18/34] Integrate Donut model --- src/transformers/models/auto/modeling_auto.py | 1 + .../pipelines/document_question_answering.py | 213 +++++++++++------- 2 files changed, 132 insertions(+), 82 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 804478efcea40a..98354b6eeb0959 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -612,6 +612,7 @@ ("layoutlm", "LayoutLMForQuestionAnswering"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ("donut-swin", "DonutSwinModel"), ] ) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 0928c358149cfd..17fac3181c0201 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -1,10 +1,12 @@ from typing import List, Optional, Tuple, Union +import re import numpy as np from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline from .question_answering import select_starts_ends +from ..models.vision_encoder_decoder import VisionEncoderDecoderModel if is_vision_available(): @@ -231,26 +233,30 @@ def preprocess( image = load_image(input["image"]) if self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) + elif isinstance(self.model, VisionEncoderDecoderModel): + raise ValueError("If you are using a VisionEncoder model, you must provide a feature extractor") words, boxes = None, None - if "word_boxes" in input: - words = [x[0] for x in input["word_boxes"]] - boxes = [x[1] for x in input["word_boxes"]] - elif "words" in image_features and "boxes" in image_features: - words = image_features.pop("words") - boxes = image_features.pop("boxes") - elif image is not None: - if not TESSERACT_LOADED: + if not isinstance(self.model, VisionEncoderDecoderModel): + if "word_boxes" in input: + words = [x[0] for x in input["word_boxes"]] + boxes = [x[1] for x in input["word_boxes"]] + elif "words" in image_features and "boxes" in image_features: + words = image_features.pop("words") + boxes = image_features.pop("boxes") + elif image is not None: + if not TESSERACT_LOADED: + raise ValueError( + "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract, but" + " pytesseract is not available" + ) + if TESSERACT_LOADED: + words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) + else: raise ValueError( - "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract, but" - " pytesseract is not available" + "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically run" + " OCR to derive words and boxes" ) - words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) - else: - raise ValueError( - "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically run" - " OCR to derive words and boxes" - ) if self.tokenizer.padding_side != "right": raise ValueError( @@ -258,64 +264,83 @@ def preprocess( f" {self.tokenizer.padding_side}" ) - encoding = self.tokenizer( - text=input["question"].split(), - text_pair=words, - padding=padding, - max_length=max_seq_len, - stride=doc_stride, - return_token_type_ids=True, - is_split_into_words=True, - return_tensors=self.framework, - # TODO: In a future PR, use these feature to handle sequences whose length is longer than - # the maximum allowed by the model. Currently, the tokenizer will produce a sequence that - # may be too long for the model to handle. - # truncation="only_second", - # return_overflowing_tokens=True, - ) - encoding.update(image_features) - - # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above, but the - # code is written to naturally handle multiple spans at the right time. - num_spans = len(encoding["input_ids"]) - - # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) - # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) - # This logic mirrors the logic in the question_answering pipeline - p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] - for span_idx in range(num_spans): - input_ids_span_idx = encoding["input_ids"][span_idx] - # keep the cls_token unmasked (some models use it to indicate unanswerable questions) - if self.tokenizer.cls_token_id is not None: - cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] - for cls_index in cls_indices: - p_mask[span_idx][cls_index] = 0 - - # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] - # for SEP tokens, and the word's bounding box for words in the original document. - bbox = [] - for batch_index in range(num_spans): - for i, s, w in zip( - encoding.input_ids[batch_index], - encoding.sequence_ids(batch_index), - encoding.word_ids(batch_index), - ): - if s == 1: - bbox.append(boxes[w]) - elif i == self.tokenizer.sep_token_id: - bbox.append([1000] * 4) - else: - bbox.append([0] * 4) - - if self.framework == "tf": - raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") - elif self.framework == "pt": - encoding["bbox"] = torch.tensor([bbox]) - - word_ids = [encoding.word_ids(i) for i in range(num_spans)] - - # TODO This will be necessary when we implement overflow support - # encoding.pop("overflow_to_sample_mapping", None) + if isinstance(self.model, VisionEncoderDecoderModel): + task_prompt = f'{input["question"]}' + # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py + encoding = { + "inputs": image_features["pixel_values"], + "decoder_input_ids": self.tokenizer( + task_prompt, add_special_tokens=False, return_tensors=self.framework + ).input_ids, + "max_length": self.model.decoder.config.max_position_embeddings, + "early_stopping": True, + "pad_token_id": self.tokenizer.pad_token_id, + "eos_token_id": self.tokenizer.eos_token_id, + "use_cache": True, + "num_beams": 1, + "bad_words_ids": [[self.tokenizer.unk_token_id]], + "return_dict_in_generate": True, + } + p_mask = None + word_ids = None + words = None + else: + encoding = self.tokenizer( + text=input["question"].split(), + text_pair=words, + padding=padding, + max_length=max_seq_len, + stride=doc_stride, + return_token_type_ids=True, + is_split_into_words=True, + return_tensors=self.framework, + # TODO: In a future PR, use these feature to handle sequences whose length is longer than + # the maximum allowed by the model. Currently, the tokenizer will produce a sequence that + # may be too long for the model to handle. + # truncation="only_second", + # return_overflowing_tokens=True, + ) + + encoding.update(image_features) + + # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above, but the + # code is written to naturally handle multiple spans at the right time. + num_spans = len(encoding["input_ids"]) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) + # This logic mirrors the logic in the question_answering pipeline + p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] + for span_idx in range(num_spans): + input_ids_span_idx = encoding["input_ids"][span_idx] + # keep the cls_token unmasked (some models use it to indicate unanswerable questions) + if self.tokenizer.cls_token_id is not None: + cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] + for cls_index in cls_indices: + p_mask[span_idx][cls_index] = 0 + + # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] + # for SEP tokens, and the word's bounding box for words in the original document. + bbox = [] + for batch_index in range(num_spans): + for i, s, w in zip( + encoding.input_ids[batch_index], + encoding.sequence_ids(batch_index), + encoding.word_ids(batch_index), + ): + if s == 1: + bbox.append(boxes[w]) + elif i == self.tokenizer.sep_token_id: + bbox.append([1000] * 4) + else: + bbox.append([0] * 4) + + if self.framework == "tf": + raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + elif self.framework == "pt": + encoding["bbox"] = torch.tensor([bbox]) + + word_ids = [encoding.word_ids(i) for i in range(num_spans)] return { **encoding, @@ -329,15 +354,44 @@ def _forward(self, model_inputs): word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) - model_outputs = self.model(**model_inputs) + if isinstance(self.model, VisionEncoderDecoderModel): + model_outputs = self.model.generate(**model_inputs) + else: + model_outputs = self.model(**model_inputs) model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words - model_outputs["attention_mask"] = model_inputs["attention_mask"] + model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) return model_outputs - def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15): + def postprocess(self, model_outputs, top_k=1, **kwargs): + if isinstance(self.model, VisionEncoderDecoderModel): + answers = self.postprocess_encoder_decoder(model_outputs) + else: + answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) + + answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] + if len(answers) == 1: + return answers[0] + return answers + + def postprocess_encoder_decoder(self, model_outputs, **kwargs): + # postprocess + sequence = self.tokenizer.batch_decode(model_outputs.sequences)[0] + sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + ret = { + "score": 0.5, # TODO + "answer": "", + } + + answer = re.search(r'(.*)', sequence) + if answer is not None: + ret['answer'] = answer.group(1).strip() + return [ret] + + def postprocess_extractive_qa(self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs): min_null_score = 1000000 # large and positive answers = [] words = model_outputs["words"] @@ -371,8 +425,3 @@ def postprocess(self, model_outputs, top_k=1, handle_impossible_answer=False, ma if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) - - answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] - if len(answers) == 1: - return answers[0] - return answers From 8e5fe30032ef6a54856f5c08027fb4a8a3f631fe Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Sat, 13 Aug 2022 12:36:42 -0700 Subject: [PATCH 19/34] Fixup --- src/transformers/models/auto/modeling_auto.py | 5 +++-- .../pipelines/document_question_answering.py | 22 ++++++++++--------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 98354b6eeb0959..019ccefc694a74 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -609,10 +609,11 @@ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ - ("layoutlm", "LayoutLMForQuestionAnswering"), + # TODO Uncomment after # 18407 lands + # ("layoutlm", "LayoutLMForQuestionAnswering"), + ("donut-swin", "DonutSwinModel"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), - ("donut-swin", "DonutSwinModel"), ] ) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 17fac3181c0201..9327b295a38980 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -1,12 +1,12 @@ -from typing import List, Optional, Tuple, Union import re +from typing import List, Optional, Tuple, Union import numpy as np +from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline from .question_answering import select_starts_ends -from ..models.vision_encoder_decoder import VisionEncoderDecoderModel if is_vision_available(): @@ -234,7 +234,7 @@ def preprocess( if self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) elif isinstance(self.model, VisionEncoderDecoderModel): - raise ValueError("If you are using a VisionEncoder model, you must provide a feature extractor") + raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None if not isinstance(self.model, VisionEncoderDecoderModel): @@ -247,15 +247,15 @@ def preprocess( elif image is not None: if not TESSERACT_LOADED: raise ValueError( - "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract, but" - " pytesseract is not available" + "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," + " but pytesseract is not available" ) if TESSERACT_LOADED: words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) else: raise ValueError( - "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically run" - " OCR to derive words and boxes" + "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" + " run OCR to derive words and boxes" ) if self.tokenizer.padding_side != "right": @@ -386,12 +386,14 @@ def postprocess_encoder_decoder(self, model_outputs, **kwargs): "answer": "", } - answer = re.search(r'(.*)', sequence) + answer = re.search(r"(.*)", sequence) if answer is not None: - ret['answer'] = answer.group(1).strip() + ret["answer"] = answer.group(1).strip() return [ret] - def postprocess_extractive_qa(self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs): + def postprocess_extractive_qa( + self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs + ): min_null_score = 1000000 # large and positive answers = [] words = model_outputs["words"] From c60533e1c7fb307fe2a97cd9d04f98f20f3f9942 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 22 Aug 2022 23:25:44 -0700 Subject: [PATCH 20/34] Incorporate comments --- .../pipelines/document_question_answering.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 9327b295a38980..fe21e74fe0496e 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -3,7 +3,6 @@ import numpy as np -from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline from .question_answering import select_starts_ends @@ -90,6 +89,9 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) + # NOTE: As Donut evolves and other similar models emerge, we should generalize this + self.is_vision_encoder_decoder = self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig" + def _sanitize_parameters( self, padding=None, @@ -211,7 +213,6 @@ def preprocess( doc_stride=None, max_question_len=64, max_seq_len=None, - word_boxes: Tuple[str, List[float]] = None, lang=None, tesseract_config="", ): @@ -233,11 +234,11 @@ def preprocess( image = load_image(input["image"]) if self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) - elif isinstance(self.model, VisionEncoderDecoderModel): + elif self.is_vision_encoder_decoder: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None - if not isinstance(self.model, VisionEncoderDecoderModel): + if not self.is_vision_encoder_decoder: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] @@ -264,7 +265,7 @@ def preprocess( f" {self.tokenizer.padding_side}" ) - if isinstance(self.model, VisionEncoderDecoderModel): + if self.is_vision_encoder_decoder: task_prompt = f'{input["question"]}' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { @@ -354,7 +355,7 @@ def _forward(self, model_inputs): word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) - if isinstance(self.model, VisionEncoderDecoderModel): + if self.is_vision_encoder_decoder: model_outputs = self.model.generate(**model_inputs) else: model_outputs = self.model(**model_inputs) @@ -366,7 +367,7 @@ def _forward(self, model_inputs): return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): - if isinstance(self.model, VisionEncoderDecoderModel): + if self.is_vision_encoder_decoder: answers = self.postprocess_encoder_decoder(model_outputs) else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) @@ -427,3 +428,5 @@ def postprocess_extractive_qa( if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) + + return answers From d45dfe74ecd9102e22b861e67cb5e531dd39009d Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Tue, 23 Aug 2022 22:04:10 -0700 Subject: [PATCH 21/34] Address comments --- src/transformers/models/auto/modeling_auto.py | 3 +-- src/transformers/pipelines/__init__.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 019ccefc694a74..9781a13356331b 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -611,7 +611,6 @@ [ # TODO Uncomment after # 18407 lands # ("layoutlm", "LayoutLMForQuestionAnswering"), - ("donut-swin", "DonutSwinModel"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ] @@ -915,7 +914,7 @@ class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): AutoModelForDocumentQuestionAnswering = auto_class_update( AutoModelForDocumentQuestionAnswering, head_doc="document question answering", - checkpoint_for_example="impira/layoutlm-doc-qa", + checkpoint_for_example="impira/layoutlm-document-qa", ) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 50c8d3f49d3e48..f431bae5df793e 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -222,7 +222,7 @@ "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { - "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")}, # TODO + "model": {"pt": ("impira/layoutlm-document-qa", "02daaaf")}, # TODO Update after # 18407 lands }, "type": "multimodal", }, From a8d260b529a4090d311668612626f0e123b2461a Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Tue, 23 Aug 2022 23:43:46 -0700 Subject: [PATCH 22/34] Initial incorporation of tests --- src/transformers/__init__.py | 2 + src/transformers/models/auto/__init__.py | 2 + .../pipelines/document_question_answering.py | 91 ++++++--- src/transformers/utils/dummy_pt_objects.py | 3 + ...t_pipelines_document_question_answering.py | 184 ++++++++++++++++++ 5 files changed, 250 insertions(+), 32 deletions(-) create mode 100644 tests/pipelines/test_pipelines_document_question_answering.py diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e190ae960ae221..70247da9ab5b1d 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -790,6 +790,7 @@ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", "MODEL_FOR_CAUSAL_LM_MAPPING", "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", @@ -3552,6 +3553,7 @@ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 697252464aa3c6..8189e0d809406e 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -47,6 +47,7 @@ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", "MODEL_FOR_CAUSAL_LM_MAPPING", "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", @@ -192,6 +193,7 @@ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index fe21e74fe0496e..a17f32e9d3faf0 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -3,7 +3,14 @@ import numpy as np -from ..utils import add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging +from ..utils import ( + ExplicitEnum, + add_end_docstrings, + is_pytesseract_available, + is_torch_available, + is_vision_available, + logging, +) from .base import PIPELINE_INIT_ARGS, Pipeline from .question_answering import select_starts_ends @@ -70,6 +77,12 @@ def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: return words, normalized_boxes +class ModelType(ExplicitEnum): + LayoutLM = "layoutlm" + LayoutLMv2Plus = "layoutlmv2+" + Donut = "donut" + + @add_end_docstrings(PIPELINE_INIT_ARGS) class DocumentQuestionAnsweringPipeline(Pipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence @@ -89,8 +102,12 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) - # NOTE: As Donut evolves and other similar models emerge, we should generalize this - self.is_vision_encoder_decoder = self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig" + if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": + self.model_type = ModelType.Donut + elif self.model.config.__class__.__name__ == "LayoutLMConfig": + self.model_type = ModelType.LayoutLM + else: + self.model_type = ModelType.LayoutLMv2Plus def _sanitize_parameters( self, @@ -234,17 +251,17 @@ def preprocess( image = load_image(input["image"]) if self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) - elif self.is_vision_encoder_decoder: + elif self.model_type == ModelType.Donut: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None - if not self.is_vision_encoder_decoder: + if not self.model_type == ModelType.Donut: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] elif "words" in image_features and "boxes" in image_features: - words = image_features.pop("words") - boxes = image_features.pop("boxes") + words = image_features.pop("words")[0] + boxes = image_features.pop("boxes")[0] elif image is not None: if not TESSERACT_LOADED: raise ValueError( @@ -265,7 +282,7 @@ def preprocess( f" {self.tokenizer.padding_side}" ) - if self.is_vision_encoder_decoder: + if self.model_type == ModelType.Donut: task_prompt = f'{input["question"]}' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { @@ -286,23 +303,32 @@ def preprocess( word_ids = None words = None else: + tokenizer_kwargs = {} + if self.model_type == ModelType.LayoutLM: + tokenizer_kwargs["text"] = input["question"].split() + tokenizer_kwargs["text_pair"] = words + tokenizer_kwargs["is_split_into_words"] = True + else: + tokenizer_kwargs["text"] = [input["question"]] + tokenizer_kwargs["text_pair"] = [words] + tokenizer_kwargs["boxes"] = [boxes] + encoding = self.tokenizer( - text=input["question"].split(), - text_pair=words, padding=padding, max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, - is_split_into_words=True, return_tensors=self.framework, # TODO: In a future PR, use these feature to handle sequences whose length is longer than # the maximum allowed by the model. Currently, the tokenizer will produce a sequence that # may be too long for the model to handle. # truncation="only_second", # return_overflowing_tokens=True, + **tokenizer_kwargs, ) - encoding.update(image_features) + if "pixel_values" in image_features: + encoding["image"] = image_features.pop("pixel_values") # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above, but the # code is written to naturally handle multiple spans at the right time. @@ -322,24 +348,25 @@ def preprocess( # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] # for SEP tokens, and the word's bounding box for words in the original document. - bbox = [] - for batch_index in range(num_spans): - for i, s, w in zip( - encoding.input_ids[batch_index], - encoding.sequence_ids(batch_index), - encoding.word_ids(batch_index), - ): - if s == 1: - bbox.append(boxes[w]) - elif i == self.tokenizer.sep_token_id: - bbox.append([1000] * 4) - else: - bbox.append([0] * 4) - - if self.framework == "tf": - raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") - elif self.framework == "pt": - encoding["bbox"] = torch.tensor([bbox]) + if "boxes" not in tokenizer_kwargs: + bbox = [] + for batch_index in range(num_spans): + for i, s, w in zip( + encoding.input_ids[batch_index], + encoding.sequence_ids(batch_index), + encoding.word_ids(batch_index), + ): + if s == 1: + bbox.append(boxes[w]) + elif i == self.tokenizer.sep_token_id: + bbox.append([1000] * 4) + else: + bbox.append([0] * 4) + + if self.framework == "tf": + raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + elif self.framework == "pt": + encoding["bbox"] = torch.tensor([bbox]) word_ids = [encoding.word_ids(i) for i in range(num_spans)] @@ -355,7 +382,7 @@ def _forward(self, model_inputs): word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) - if self.is_vision_encoder_decoder: + if self.model_type == ModelType.Donut: model_outputs = self.model.generate(**model_inputs) else: model_outputs = self.model(**model_inputs) @@ -367,7 +394,7 @@ def _forward(self, model_inputs): return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): - if self.is_vision_encoder_decoder: + if self.model_type == ModelType.Donut: answers = self.postprocess_encoder_decoder(model_outputs) else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c0e68ab3551720..dbdf37da4c7161 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -358,6 +358,9 @@ def load_tf_weights_in_albert(*args, **kwargs): MODEL_FOR_CTC_MAPPING = None +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py new file mode 100644 index 00000000000000..8bdca0637be00a --- /dev/null +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -0,0 +1,184 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import unittest + +from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available +from transformers.pipelines import pipeline +from transformers.testing_utils import ( + is_pipeline_test, + nested_simplify, + require_detectron2, + require_pytesseract, + require_tf, + require_torch, + require_vision, + slow, +) + +from .test_pipelines_common import ANY, PipelineTestCaseMeta + + +if is_vision_available(): + from PIL import Image +else: + + class Image: + @staticmethod + def open(*args, **kwargs): + pass + + +@is_pipeline_test +@require_torch +@require_vision +class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): + model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + def get_test_pipeline(self, model, tokenizer, feature_extractor): + dqa_pipeline = pipeline( + "document-question-answering", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor + ) + examples = [ + { + "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), + "question": "How many cats are there?", + }, + { + "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", + "question": "How many cats are there?", + }, + { + "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", + "question": "How many cats are there?", + "word_boxes": json.load(open("./tests/fixtures/tests_samples/COCO/000000039769.json", "r")), + }, + ] + return dqa_pipeline, examples + + def run_pipeline_test(self, dqa_pipeline, examples): + outputs = dqa_pipeline(examples, top_k=2) + self.assertEqual( + outputs, + [ + [{"score": ANY(float), "answer": ANY(str)}], + [{"score": ANY(float), "answer": ANY(str)}], + ], + ) + + # TODO: Add layoutlmv1 once PR #18407 lands + + @require_torch + @require_detectron2 + @require_pytesseract + def test_small_model_pt_layoutlmv2(self): + dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") + image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + question = "How many cats are there?" + + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual(outputs, [{"score": 2.0, "answer": "te"}, {"score": 2.0, "answer": "te"}]) + + outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) + self.assertEqual(outputs, [{"score": 2.0, "answer": "te"}, {"score": 2.0, "answer": "te"}]) + + # This image does not detect ANY text in it, meaning layoutlmv2 should fail. + # Empty answer probably + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual(outputs, []) + + # We can optionnally pass directly the words and bounding boxes + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + words = [] + boxes = [] + outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) + self.assertEqual(outputs, []) + + def test_small_model_pt_donut(self): + dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") + # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") + image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + question = "How many cats are there?" + + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] + ) + + @slow + @require_torch + @require_detectron2 + @require_pytesseract + def test_large_model_pt_layoutlmv2(self): + dqa_pipeline = pipeline( + "document-question-answering", + model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", + revision="9977165", + ) + image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + question = "What is the invoice number?" + + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + ], + ) + + outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + ], + ) + + outputs = dqa_pipeline( + [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + ], + ] + * 2, + ) + + @slow + @require_torch + def test_large_model_pt_donut(self): + dqa_pipeline = pipeline( + "document-question-answering", + model="naver-clova-ix/donut-base-finetuned-docvqa", + tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), + feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", + ) + + image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + question = "What is the invoice number?" + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual(nested_simplify(outputs, decimals=4), {"score": 0.5, "answer": "us-001"}) + + @require_tf + @unittest.skip("Document question answering not implemented in TF") + def test_small_model_tf(self): + pass From aeff3b2f594dcf44e49af701a6c69a8e5b575915 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 24 Aug 2022 08:24:52 -0700 Subject: [PATCH 23/34] Address Comments --- src/transformers/models/auto/modeling_auto.py | 6 ------ .../pipelines/document_question_answering.py | 11 ++++++----- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 9781a13356331b..dbe518ff3283a9 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -541,10 +541,6 @@ ] ) -# TODO: Do the LayoutLM classes belong here, or should they only be in VisualQuestionAnsweringPipeline? -# Because the bounding boxes are optional inputs to the model, you technically _can_ execute these models -# through the question answering pipeline (because its output shape matches), but I'm not sure if models -# should belong in multiple mappings. MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ # Model for Question Answering mapping @@ -566,8 +562,6 @@ ("funnel", "FunnelForQuestionAnswering"), ("gptj", "GPTJForQuestionAnswering"), ("ibert", "IBertForQuestionAnswering"), - ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), - ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ("led", "LEDForQuestionAnswering"), ("longformer", "LongformerForQuestionAnswering"), ("luke", "LukeForQuestionAnswering"), diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index a17f32e9d3faf0..6f2cf3db8a3d34 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -79,7 +79,7 @@ def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: class ModelType(ExplicitEnum): LayoutLM = "layoutlm" - LayoutLMv2Plus = "layoutlmv2+" + LayoutLMv2Plus = "layoutlmv2+" # Refers to LayoutLMv2 and LayoutLMv3 Donut = "donut" @@ -159,7 +159,8 @@ def __call__( """ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not - provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically. + provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for + LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: @@ -182,9 +183,9 @@ def __call__( A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the - pipeline will use these words and boxes instead of running OCR on the image to derive them. This allows - you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each - time. + pipeline will use these words and boxes instead of running OCR on the image to derive them for models + that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the + pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. From a9e70c8e8029824a133f1e6a88bd94b84f4646c8 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 24 Aug 2022 13:20:48 -0700 Subject: [PATCH 24/34] Change assert to ValueError --- src/transformers/pipelines/document_question_answering.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 6f2cf3db8a3d34..f7a182555a5634 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -72,7 +72,8 @@ def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) - assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes" + if len(words) != len(normalized_boxes): + raise ValueError("Not as many words as there are bounding boxes") return words, normalized_boxes From f654983d0f4d1df4a04c19df7a8f380f4b50c46c Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Thu, 25 Aug 2022 23:00:52 -0700 Subject: [PATCH 25/34] Comments --- .../pipelines/document_question_answering.py | 4 ++-- .../test_pipelines_document_question_answering.py | 13 +++++++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index f7a182555a5634..97cf26f21bd028 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -80,7 +80,7 @@ def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: class ModelType(ExplicitEnum): LayoutLM = "layoutlm" - LayoutLMv2Plus = "layoutlmv2+" # Refers to LayoutLMv2 and LayoutLMv3 + LayoutLMv2andv3 = "layoutlmv2andv3" Donut = "donut" @@ -108,7 +108,7 @@ def __init__(self, *args, **kwargs): elif self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: - self.model_type = ModelType.LayoutLMv2Plus + self.model_type = ModelType.LayoutLMv2andv3 def _sanitize_parameters( self, diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py index 8bdca0637be00a..e30b9fe346bb2a 100644 --- a/tests/pipelines/test_pipelines_document_question_answering.py +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -88,11 +88,20 @@ def test_small_model_pt_layoutlmv2(self): image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" question = "How many cats are there?" + expected_output = [ + { + "score": 0.0001, + "answer": "2312/2019 DUE DATE 26102/2019 ay DESCRIPTION UNIT PRICE", + "start": 38, + "end": 45, + }, + {"score": 0.0001, "answer": "2312/2019 DUE", "start": 38, "end": 39}, + ] outputs = dqa_pipeline(image=image, question=question, top_k=2) - self.assertEqual(outputs, [{"score": 2.0, "answer": "te"}, {"score": 2.0, "answer": "te"}]) + self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) - self.assertEqual(outputs, [{"score": 2.0, "answer": "te"}, {"score": 2.0, "answer": "te"}]) + self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably From 23f66007844da971af4a2dd8d2b7482a299b98a3 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Tue, 30 Aug 2022 06:49:21 -0700 Subject: [PATCH 26/34] Wrap `score` in float to make it JSON serializable --- src/transformers/pipelines/document_question_answering.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 97cf26f21bd028..05ef0c766f47a0 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -448,7 +448,7 @@ def postprocess_extractive_qa( if word_start is not None and word_end is not None: answers.append( { - "score": score, + "score": float(score), # XXX Write a test that verifies the result is JSON-serializable "answer": " ".join(words[word_start : word_end + 1]), "start": word_start, "end": word_end, From 0168f3acd1e4bcfb2f4b8ebd1dce917bc6e7cfcc Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 31 Aug 2022 22:40:37 -0700 Subject: [PATCH 27/34] Incorporate AutoModeLForDocumentQuestionAnswering changes --- src/transformers/__init__.py | 6 +++++ src/transformers/models/auto/__init__.py | 2 ++ src/transformers/models/auto/modeling_auto.py | 7 +++--- .../models/auto/modeling_tf_auto.py | 21 ++++++++++++++++ src/transformers/pipelines/__init__.py | 2 +- src/transformers/utils/fx.py | 5 ++-- .../models/layoutlm/test_modeling_layoutlm.py | 24 ------------------- .../layoutlm/test_modeling_tf_layoutlm.py | 18 -------------- tests/test_modeling_common.py | 17 +++++++++---- tests/test_modeling_tf_common.py | 12 ++++++---- 10 files changed, 56 insertions(+), 58 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 70247da9ab5b1d..55b5c061269c53 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2110,6 +2110,7 @@ "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", @@ -2127,6 +2128,7 @@ "TFAutoModelForMultipleChoice", "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", + "TFAutoModelForDocumentQuestionAnswering", "TFAutoModelForQuestionAnswering", "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", @@ -4620,6 +4622,7 @@ TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, + TFLayoutLMForDocumentQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMMainLayer, @@ -4635,6 +4638,7 @@ TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, + TFAlbertForDocumentQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, @@ -4649,6 +4653,7 @@ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, @@ -4667,6 +4672,7 @@ TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, + TFAutoModelForDocumentQuestionAnswering, TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 8189e0d809406e..db894c61d32081 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -113,6 +113,7 @@ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", @@ -259,6 +260,7 @@ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index dbe518ff3283a9..e89d93f0c2743f 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -562,6 +562,8 @@ ("funnel", "FunnelForQuestionAnswering"), ("gptj", "GPTJForQuestionAnswering"), ("ibert", "IBertForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ("led", "LEDForQuestionAnswering"), ("longformer", "LongformerForQuestionAnswering"), ("luke", "LukeForQuestionAnswering"), @@ -603,10 +605,7 @@ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ - # TODO Uncomment after # 18407 lands - # ("layoutlm", "LayoutLMForQuestionAnswering"), - ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), - ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ("layoutlm", "LayoutLMForQuestionAnswering"), ] ) diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index a12f6accdcaeee..46c85022ecad35 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -315,6 +315,13 @@ ] ) +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "TFLayoutLMForQuestionAnswering"), + ] +) + + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ # Model for Table Question Answering mapping @@ -406,6 +413,9 @@ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES ) @@ -515,6 +525,17 @@ class TFAutoModelForQuestionAnswering(_BaseAutoModelClass): TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering") +class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForDocumentQuestionAnswering = auto_class_update( + TFAutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example="impira/layoutlm-document-qa", +) + + class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index f431bae5df793e..87b6dbe3733e94 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -222,7 +222,7 @@ "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { - "model": {"pt": ("impira/layoutlm-document-qa", "02daaaf")}, # TODO Update after # 18407 lands + "model": {"pt": ("impira/layoutlm-document-qa", "3a93017")}, # TODO Update with custom pipeline removed, just before we land }, "type": "multimodal", }, diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index aec3c950ae435a..e21bbfa462d157 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -43,6 +43,7 @@ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, @@ -71,6 +72,7 @@ def _generate_supported_model_class_names( "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, + "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, @@ -147,7 +149,6 @@ def _generate_supported_model_class_names( "GPT2DoubleHeadsModel", "Speech2Text2Decoder", "TrOCRDecoder", - "LayoutLMForQuestionAnswering", # TODO: add support for them as it should be quite easy to do so (small blocking issues). # XLNetForQuestionAnswering, ] @@ -691,7 +692,7 @@ def _generate_dummy_input( inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), - "LayoutLMForQuestionAnswering", + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index cce3c9b3f48615..f0e01d16eadb21 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -273,30 +273,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): - inputs_dict = copy.deepcopy(inputs_dict) - if return_labels: - if model_class in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): - inputs_dict["labels"] = torch.zeros( - self.model_tester.batch_size, dtype=torch.long, device=torch_device - ) - elif model_class in [ - *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), - *get_values(MODEL_FOR_MASKED_LM_MAPPING), - ]: - inputs_dict["labels"] = torch.zeros( - (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device - ) - elif model_class.__name__ == "LayoutLMForQuestionAnswering": - inputs_dict["start_positions"] = torch.zeros( - self.model_tester.batch_size, dtype=torch.long, device=torch_device - ) - inputs_dict["end_positions"] = torch.zeros( - self.model_tester.batch_size, dtype=torch.long, device=torch_device - ) - - return inputs_dict - def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: diff --git a/tests/models/layoutlm/test_modeling_tf_layoutlm.py b/tests/models/layoutlm/test_modeling_tf_layoutlm.py index 9323b0bb9b97d2..2b6b80e698f927 100644 --- a/tests/models/layoutlm/test_modeling_tf_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_tf_layoutlm.py @@ -263,24 +263,6 @@ def test_model_from_pretrained(self): model = TFLayoutLMModel.from_pretrained(model_name) self.assertIsNotNone(model) - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): - inputs_dict = copy.deepcopy(inputs_dict) - if return_labels: - if model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): - inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) - elif model_class in [ - *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), - *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), - ]: - inputs_dict["labels"] = tf.zeros( - (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 - ) - elif model_class.__name__ == "TFLayoutLMForQuestionAnswering": - inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) - inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) - - return inputs_dict - def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 05921334a6b8bb..09644944261fe5 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -95,6 +95,7 @@ MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, @@ -172,7 +173,10 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) - elif model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): + elif model_class in [ + *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING), + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), + ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) @@ -201,7 +205,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): elif model_class in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( - (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device + (self.model_tester.batch_size, num_patches ** 2), dtype=torch.long, device=torch_device ) elif model_class in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape @@ -542,7 +546,10 @@ def test_attention_outputs(self): if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits - if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): + if model_class in [ + *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING), + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), + ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned @@ -2785,9 +2792,9 @@ def test_checkpoint_sharding_local(self): # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): - max_size_int = int(max_size[:-3]) * 2**10 + max_size_int = int(max_size[:-3]) * 2 ** 10 else: - max_size_int = int(max_size[:-2]) * 10**3 + max_size_int = int(max_size[:-2]) * 10 ** 3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index f3608f4b225d86..e2845c03cdcdd9 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -67,6 +67,7 @@ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, @@ -149,7 +150,10 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) - elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): + elif model_class in [ + *get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING), + *get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), + ]: inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ @@ -173,7 +177,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = tf.zeros( - (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 + (self.model_tester.batch_size, num_patches ** 2), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape @@ -2057,9 +2061,9 @@ def test_checkpoint_sharding_local(self): # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): - max_size_int = int(max_size[:-3]) * 2**10 + max_size_int = int(max_size[:-3]) * 2 ** 10 else: - max_size_int = int(max_size[:-2]) * 10**3 + max_size_int = int(max_size[:-2]) * 10 ** 3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: From 92f641b405e03fa4b2a0b04f785c1a4f9204e246 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 31 Aug 2022 22:50:31 -0700 Subject: [PATCH 28/34] Fixup --- docs/source/en/model_doc/auto.mdx | 4 ++++ src/transformers/__init__.py | 6 ++---- src/transformers/models/auto/__init__.py | 2 +- src/transformers/pipelines/__init__.py | 4 +++- .../pipelines/document_question_answering.py | 1 - src/transformers/utils/dummy_tf_objects.py | 10 ++++++++++ src/transformers/utils/fx.py | 2 +- tests/models/layoutlm/test_modeling_layoutlm.py | 6 ------ tests/models/layoutlm/test_modeling_tf_layoutlm.py | 7 ------- tests/test_modeling_common.py | 8 ++++---- tests/test_modeling_tf_common.py | 8 ++++---- 11 files changed, 29 insertions(+), 29 deletions(-) diff --git a/docs/source/en/model_doc/auto.mdx b/docs/source/en/model_doc/auto.mdx index af73c13d4660e5..93976424ba8edd 100644 --- a/docs/source/en/model_doc/auto.mdx +++ b/docs/source/en/model_doc/auto.mdx @@ -218,6 +218,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] TFAutoModelForTableQuestionAnswering +## TFAutoModelForDocumentQuestionAnswering + +[[autodoc]] TFAutoModelForDocumentQuestionAnswering + ## TFAutoModelForTokenClassification [[autodoc]] TFAutoModelForTokenClassification diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 55b5c061269c53..e10e2ce0ba0a41 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -4622,7 +4622,6 @@ TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, - TFLayoutLMForDocumentQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMMainLayer, @@ -4638,7 +4637,6 @@ TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, - TFAlbertForDocumentQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, @@ -4647,13 +4645,13 @@ ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, - TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, @@ -4666,13 +4664,13 @@ TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, - TFAutoModelForDocumentQuestionAnswering, TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index db894c61d32081..3b0b1f34752b08 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -253,6 +253,7 @@ else: from .modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, @@ -260,7 +261,6 @@ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, - TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 87b6dbe3733e94..ca8102d57695fa 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -222,7 +222,9 @@ "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { - "model": {"pt": ("impira/layoutlm-document-qa", "3a93017")}, # TODO Update with custom pipeline removed, just before we land + "model": { + "pt": ("impira/layoutlm-document-qa", "3a93017") + }, # TODO Update with custom pipeline removed, just before we land }, "type": "multimodal", }, diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 05ef0c766f47a0..8075008fd36219 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -412,7 +412,6 @@ def postprocess_encoder_decoder(self, model_outputs, **kwargs): sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { - "score": 0.5, # TODO "answer": "", } diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index bc3eb64ca46dab..69e11eeb31d605 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -265,6 +265,9 @@ def __init__(self, *args, **kwargs): TF_MODEL_FOR_CAUSAL_LM_MAPPING = None +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None @@ -327,6 +330,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + class TFAutoModelForImageClassification(metaclass=DummyObject): _backends = ["tf"] diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index e21bbfa462d157..c08f6766c9dfc4 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -36,6 +36,7 @@ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, @@ -43,7 +44,6 @@ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, - MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index f0e01d16eadb21..16cacab88c8614 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -12,12 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import copy import unittest from transformers import LayoutLMConfig, is_torch_available -from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester @@ -28,9 +25,6 @@ import torch from transformers import ( - MODEL_FOR_MASKED_LM_MAPPING, - MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, - MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, diff --git a/tests/models/layoutlm/test_modeling_tf_layoutlm.py b/tests/models/layoutlm/test_modeling_tf_layoutlm.py index 2b6b80e698f927..4224f20a1da76d 100644 --- a/tests/models/layoutlm/test_modeling_tf_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_tf_layoutlm.py @@ -13,13 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available -from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester @@ -29,11 +27,6 @@ if is_tf_available(): import tensorflow as tf - from transformers import ( - TF_MODEL_FOR_MASKED_LM_MAPPING, - TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, - TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, - ) from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 09644944261fe5..6c4814c1a87274 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -89,13 +89,13 @@ MODEL_FOR_AUDIO_XVECTOR_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, - MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, @@ -205,7 +205,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): elif model_class in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( - (self.model_tester.batch_size, num_patches ** 2), dtype=torch.long, device=torch_device + (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape @@ -2792,9 +2792,9 @@ def test_checkpoint_sharding_local(self): # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): - max_size_int = int(max_size[:-3]) * 2 ** 10 + max_size_int = int(max_size[:-3]) * 2**10 else: - max_size_int = int(max_size[:-2]) * 10 ** 3 + max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index e2845c03cdcdd9..0ef457c03523eb 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -61,13 +61,13 @@ from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, - TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, @@ -177,7 +177,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = tf.zeros( - (self.model_tester.batch_size, num_patches ** 2), dtype=tf.int32 + (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape @@ -2061,9 +2061,9 @@ def test_checkpoint_sharding_local(self): # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): - max_size_int = int(max_size[:-3]) * 2 ** 10 + max_size_int = int(max_size[:-3]) * 2**10 else: - max_size_int = int(max_size[:-2]) * 10 ** 3 + max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: From 2fe7a8cad6a5a0db0c3b3230c1d04513d0e4b353 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 31 Aug 2022 22:54:04 -0700 Subject: [PATCH 29/34] Rename postprocess function --- src/transformers/pipelines/document_question_answering.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 8075008fd36219..3d7f9b7a46df36 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -397,7 +397,7 @@ def _forward(self, model_inputs): def postprocess(self, model_outputs, top_k=1, **kwargs): if self.model_type == ModelType.Donut: - answers = self.postprocess_encoder_decoder(model_outputs) + answers = self.postprocess_donut(model_outputs) else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) @@ -406,7 +406,7 @@ def postprocess(self, model_outputs, top_k=1, **kwargs): return answers[0] return answers - def postprocess_encoder_decoder(self, model_outputs, **kwargs): + def postprocess_donut(self, model_outputs, **kwargs): # postprocess sequence = self.tokenizer.batch_decode(model_outputs.sequences)[0] sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") From a59fbd316a4cab77195ce96177e47d9f189cfdf8 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 31 Aug 2022 23:06:30 -0700 Subject: [PATCH 30/34] Fix auto import --- src/transformers/models/auto/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 3b0b1f34752b08..6129253f14711b 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -130,6 +130,7 @@ "TFAutoModelForMultipleChoice", "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", + "TFAutoModelForDocumentQuestionAnswering", "TFAutoModelForQuestionAnswering", "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", @@ -272,6 +273,7 @@ TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, From 6c94556c520f084313bff4578b34a6b6fea0f738 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 5 Sep 2022 14:23:39 -0700 Subject: [PATCH 31/34] Applying comments --- src/transformers/models/auto/modeling_auto.py | 4 +- .../pipelines/document_question_answering.py | 46 +- .../tests_samples/DocVQA/yrvw0217_50.json | 1226 +++++++++++++++++ .../tests_samples/DocVQA/yrvw0217_50.png | Bin 0 -> 96744 bytes ...t_pipelines_document_question_answering.py | 131 +- 5 files changed, 1345 insertions(+), 62 deletions(-) create mode 100644 tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json create mode 100644 tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index e89d93f0c2743f..545bc348bdb1ca 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -606,6 +606,8 @@ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ ("layoutlm", "LayoutLMForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ] ) @@ -907,7 +909,7 @@ class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): AutoModelForDocumentQuestionAnswering = auto_class_update( AutoModelForDocumentQuestionAnswering, head_doc="document question answering", - checkpoint_for_example="impira/layoutlm-document-qa", + checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', # XXX verify docs ) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 3d7f9b7a46df36..ce56fb38c890fb 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -81,7 +81,7 @@ def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: class ModelType(ExplicitEnum): LayoutLM = "layoutlm" LayoutLMv2andv3 = "layoutlmv2andv3" - Donut = "donut" + VisionEncoderDecoder = "vision_encoder_decoder" @add_end_docstrings(PIPELINE_INIT_ARGS) @@ -104,7 +104,7 @@ def __init__(self, *args, **kwargs): self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": - self.model_type = ModelType.Donut + self.model_type = ModelType.VisionEncoderDecoder elif self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: @@ -228,36 +228,20 @@ def __call__( def preprocess( self, input, - padding="do_not_pad", - doc_stride=None, - max_question_len=64, - max_seq_len=None, lang=None, tesseract_config="", ): - # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR - # to support documents with enough tokens that overflow the model's window - # if max_seq_len is None: - # # TODO: LayoutLM's stride is 512 by default. Is it ok to use that as the min - # # instead of 384 (which the QA model uses)? - # max_seq_len = min(self.tokenizer.model_max_length, 512) - - if doc_stride is not None: - # TODO implement - # doc_stride = min(max_seq_len // 2, 128) - raise ValueError("Unsupported: striding inputs") - image = None image_features = {} - if "image" in input: + if input.get("image", None) is not None: image = load_image(input["image"]) if self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) - elif self.model_type == ModelType.Donut: + elif self.model_type == ModelType.VisionEncoderDecoder: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None - if not self.model_type == ModelType.Donut: + if not self.model_type == ModelType.VisionEncoderDecoder: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] @@ -284,7 +268,7 @@ def preprocess( f" {self.tokenizer.padding_side}" ) - if self.model_type == ModelType.Donut: + if self.model_type == ModelType.VisionEncoderDecoder: task_prompt = f'{input["question"]}' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { @@ -292,13 +276,6 @@ def preprocess( "decoder_input_ids": self.tokenizer( task_prompt, add_special_tokens=False, return_tensors=self.framework ).input_ids, - "max_length": self.model.decoder.config.max_position_embeddings, - "early_stopping": True, - "pad_token_id": self.tokenizer.pad_token_id, - "eos_token_id": self.tokenizer.eos_token_id, - "use_cache": True, - "num_beams": 1, - "bad_words_ids": [[self.tokenizer.unk_token_id]], "return_dict_in_generate": True, } p_mask = None @@ -316,9 +293,6 @@ def preprocess( tokenizer_kwargs["boxes"] = [boxes] encoding = self.tokenizer( - padding=padding, - max_length=max_seq_len, - stride=doc_stride, return_token_type_ids=True, return_tensors=self.framework, # TODO: In a future PR, use these feature to handle sequences whose length is longer than @@ -384,7 +358,7 @@ def _forward(self, model_inputs): word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) - if self.model_type == ModelType.Donut: + if self.model_type == ModelType.VisionEncoderDecoder: model_outputs = self.model.generate(**model_inputs) else: model_outputs = self.model(**model_inputs) @@ -396,12 +370,12 @@ def _forward(self, model_inputs): return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): - if self.model_type == ModelType.Donut: + if self.model_type == ModelType.VisionEncoderDecoder: answers = self.postprocess_donut(model_outputs) else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) - answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] + answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] if len(answers) == 1: return answers[0] return answers @@ -412,7 +386,7 @@ def postprocess_donut(self, model_outputs, **kwargs): sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { - "answer": "", + "answer": None, } answer = re.search(r"(.*)", sequence) diff --git a/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json b/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json new file mode 100644 index 00000000000000..03ca09d6f1ff78 --- /dev/null +++ b/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json @@ -0,0 +1,1226 @@ +[ + [ + "but", + [ + 121, + 89, + 147, + 101 + ] + ], + [ + "no", + [ + 153, + 92, + 174, + 101 + ] + ], + [ + "bleeding).", + [ + 180, + 89, + 264, + 103 + ] + ], + [ + "Each", + [ + 277, + 89, + 320, + 101 + ] + ], + [ + "measure", + [ + 328, + 92, + 402, + 100 + ] + ], + [ + "was", + [ + 408, + 92, + 442, + 100 + ] + ], + [ + "compared", + [ + 449, + 89, + 533, + 103 + ] + ], + [ + "visually", + [ + 541, + 89, + 604, + 103 + ] + ], + [ + "to", + [ + 610, + 89, + 626, + 100 + ] + ], + [ + "the", + [ + 631, + 89, + 658, + 100 + ] + ], + [ + "HOPE", + [ + 665, + 89, + 719, + 100 + ] + ], + [ + "study", + [ + 726, + 89, + 773, + 103 + ] + ], + [ + "data", + [ + 778, + 89, + 815, + 100 + ] + ], + [ + "on", + [ + 821, + 92, + 842, + 100 + ] + ], + [ + "Premarin", + [ + 121, + 106, + 198, + 117 + ] + ], + [ + "0.625mg/MPA", + [ + 205, + 106, + 328, + 120 + ] + ], + [ + "2.5mg.", + [ + 334, + 106, + 392, + 120 + ] + ], + [ + "Figure", + [ + 218, + 179, + 302, + 200 + ] + ], + [ + "1.", + [ + 313, + 179, + 332, + 195 + ] + ], + [ + "Cumulative", + [ + 343, + 179, + 491, + 195 + ] + ], + [ + "Cycles", + [ + 501, + 179, + 589, + 199 + ] + ], + [ + "of", + [ + 599, + 179, + 626, + 195 + ] + ], + [ + "Amenorrhea", + [ + 632, + 179, + 797, + 195 + ] + ], + [ + "EE", + [ + 413, + 205, + 448, + 221 + ] + ], + [ + "Population", + [ + 458, + 205, + 600, + 225 + ] + ], + [ + "100", + [ + 148, + 256, + 184, + 269 + ] + ], + [ + "~e-", + [ + 682, + 285, + 717, + 292 + ] + ], + [ + "Placebo", + [ + 722, + 282, + 807, + 295 + ] + ], + [ + "300", + [ + 814, + 282, + 850, + 295 + ] + ], + [ + "\u2014:-", + [ + 682, + 309, + 717, + 316 + ] + ], + [ + "TMG", + [ + 721, + 306, + 771, + 319 + ] + ], + [ + "0.0625", + [ + 779, + 306, + 846, + 319 + ] + ], + [ + "(CC)", + [ + 854, + 305, + 899, + 322 + ] + ], + [ + "~~", + [ + 682, + 328, + 718, + 349 + ] + ], + [ + "TMG", + [ + 727, + 329, + 763, + 343 + ] + ], + [ + "0.125", + [ + 779, + 330, + 834, + 343 + ] + ], + [ + "(CC)", + [ + 841, + 329, + 886, + 347 + ] + ], + [ + "\u2014-", + [ + 682, + 357, + 716, + 364 + ] + ], + [ + "0.625/MPA", + [ + 722, + 354, + 834, + 367 + ] + ], + [ + "2.5", + [ + 841, + 354, + 871, + 367 + ] + ], + [ + "CC", + [ + 878, + 354, + 908, + 367 + ] + ], + [ + "\u2014*-", + [ + 679, + 376, + 716, + 396 + ] + ], + [ + "0.45/MPA", + [ + 722, + 378, + 822, + 390 + ] + ], + [ + "1.5", + [ + 830, + 378, + 859, + 390 + ] + ], + [ + "CC", + [ + 866, + 378, + 896, + 390 + ] + ], + [ + "%", + [ + 114, + 376, + 133, + 389 + ] + ], + [ + "of", + [ + 114, + 354, + 131, + 370 + ] + ], + [ + "subjects", + [ + 114, + 281, + 136, + 350 + ] + ], + [ + "\u2018", + [ + 215, + 428, + 223, + 440 + ] + ], + [ + "2", + [ + 249, + 428, + 260, + 440 + ] + ], + [ + "(3)", + [ + 269, + 416, + 304, + 467 + ] + ], + [ + "4", + [ + 320, + 427, + 332, + 440 + ] + ], + [ + "5)", + [ + 356, + 428, + 368, + 440 + ] + ], + [ + "6", + [ + 392, + 428, + 403, + 440 + ] + ], + [ + "7", + [ + 427, + 428, + 438, + 440 + ] + ], + [ + "8", + [ + 462, + 428, + 473, + 440 + ] + ], + [ + "(9)", + [ + 491, + 410, + 523, + 451 + ] + ], + [ + "10", + [ + 528, + 428, + 550, + 440 + ] + ], + [ + "11", + [ + 563, + 427, + 583, + 440 + ] + ], + [ + "12.13", + [ + 599, + 410, + 657, + 451 + ] + ], + [ + "Cycle", + [ + 403, + 451, + 461, + 477 + ] + ], + [ + "a", + [ + 498, + 463, + 545, + 497 + ] + ], + [ + ".", + [ + 556, + 477, + 559, + 481 + ] + ], + [ + "(ee", + [ + 572, + 450, + 650, + 502 + ] + ], + [ + "po", + [ + 645, + 445, + 723, + 487 + ] + ], + [ + "There", + [ + 120, + 513, + 171, + 524 + ] + ], + [ + "were", + [ + 177, + 515, + 218, + 524 + ] + ], + [ + "no", + [ + 226, + 515, + 246, + 524 + ] + ], + [ + "differences", + [ + 252, + 513, + 347, + 524 + ] + ], + [ + "between", + [ + 354, + 513, + 426, + 524 + ] + ], + [ + "CE/TMG", + [ + 433, + 512, + 507, + 524 + ] + ], + [ + "treatment", + [ + 513, + 513, + 596, + 524 + ] + ], + [ + "groups", + [ + 601, + 515, + 661, + 527 + ] + ], + [ + "for", + [ + 666, + 512, + 690, + 524 + ] + ], + [ + "cumulative", + [ + 696, + 512, + 788, + 524 + ] + ], + [ + "amenorrhea", + [ + 120, + 530, + 225, + 541 + ] + ], + [ + "at", + [ + 231, + 530, + 247, + 541 + ] + ], + [ + "any", + [ + 253, + 533, + 284, + 543 + ] + ], + [ + "period", + [ + 290, + 529, + 344, + 543 + ] + ], + [ + "examined", + [ + 350, + 529, + 433, + 541 + ] + ], + [ + "or", + [ + 440, + 533, + 457, + 541 + ] + ], + [ + "for", + [ + 462, + 529, + 485, + 541 + ] + ], + [ + "cumulative", + [ + 491, + 529, + 583, + 541 + ] + ], + [ + "no", + [ + 590, + 532, + 610, + 540 + ] + ], + [ + "bleeding", + [ + 617, + 529, + 690, + 543 + ] + ], + [ + "except", + [ + 696, + 530, + 754, + 543 + ] + ], + [ + "for", + [ + 759, + 529, + 783, + 541 + ] + ], + [ + "cycles", + [ + 788, + 529, + 841, + 543 + ] + ], + [ + "7", + [ + 848, + 530, + 858, + 541 + ] + ], + [ + "to", + [ + 864, + 530, + 879, + 541 + ] + ], + [ + "13", + [ + 122, + 547, + 141, + 557 + ] + ], + [ + "for", + [ + 147, + 547, + 170, + 557 + ] + ], + [ + "the", + [ + 175, + 547, + 202, + 557 + ] + ], + [ + "EE", + [ + 209, + 547, + 233, + 557 + ] + ], + [ + "population.", + [ + 240, + 547, + 334, + 561 + ] + ], + [ + "The", + [ + 346, + 547, + 379, + 557 + ] + ], + [ + "last", + [ + 386, + 547, + 416, + 557 + ] + ], + [ + "observation", + [ + 422, + 546, + 521, + 557 + ] + ], + [ + "carried", + [ + 528, + 546, + 586, + 557 + ] + ], + [ + "forward", + [ + 592, + 546, + 656, + 557 + ] + ], + [ + "(LOCF)", + [ + 664, + 546, + 728, + 561 + ] + ], + [ + "analysis", + [ + 735, + 546, + 804, + 560 + ] + ], + [ + "(Figure", + [ + 811, + 547, + 871, + 561 + ] + ], + [ + "2)", + [ + 121, + 563, + 137, + 577 + ] + ], + [ + "indicated", + [ + 144, + 563, + 220, + 575 + ] + ], + [ + "that", + [ + 227, + 563, + 259, + 575 + ] + ], + [ + "the", + [ + 264, + 563, + 291, + 574 + ] + ], + [ + "CE/TMG", + [ + 298, + 563, + 373, + 574 + ] + ], + [ + "treatment", + [ + 379, + 564, + 461, + 575 + ] + ], + [ + "groups", + [ + 467, + 566, + 525, + 577 + ] + ], + [ + "had", + [ + 532, + 563, + 563, + 574 + ] + ], + [ + "similar", + [ + 570, + 563, + 627, + 574 + ] + ], + [ + "rates", + [ + 633, + 564, + 675, + 574 + ] + ], + [ + "of", + [ + 681, + 563, + 698, + 574 + ] + ], + [ + "cumulative", + [ + 703, + 563, + 796, + 575 + ] + ], + [ + "amenorrhea,", + [ + 121, + 580, + 230, + 593 + ] + ], + [ + "which", + [ + 236, + 580, + 286, + 591 + ] + ], + [ + "approached", + [ + 293, + 580, + 394, + 594 + ] + ], + [ + "that", + [ + 401, + 580, + 434, + 591 + ] + ], + [ + "of", + [ + 439, + 580, + 456, + 591 + ] + ], + [ + "the", + [ + 461, + 580, + 487, + 591 + ] + ], + [ + "placebo", + [ + 494, + 580, + 560, + 594 + ] + ], + [ + "group", + [ + 567, + 582, + 616, + 594 + ] + ], + [ + "by", + [ + 623, + 580, + 643, + 594 + ] + ], + [ + "cycle", + [ + 648, + 580, + 692, + 594 + ] + ], + [ + "13.", + [ + 701, + 580, + 725, + 591 + ] + ], + [ + "Confidential", + [ + 121, + 905, + 233, + 922 + ] + ], + [ + "12", + [ + 474, + 907, + 494, + 918 + ] + ], + [ + "of", + [ + 500, + 907, + 516, + 918 + ] + ], + [ + "42", + [ + 527, + 908, + 546, + 918 + ] + ], + [ + "Confidential", + [ + 12, + 964, + 95, + 973 + ] + ], + [ + "Pursuant", + [ + 101, + 964, + 165, + 973 + ] + ], + [ + "to", + [ + 169, + 965, + 182, + 973 + ] + ], + [ + "Confidentiality", + [ + 187, + 964, + 286, + 976 + ] + ], + [ + "KERZC001-000285", + [ + 843, + 967, + 982, + 976 + ] + ], + [ + "Order", + [ + 12, + 978, + 53, + 987 + ] + ] +] \ No newline at end of file diff --git a/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png b/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png new file mode 100644 index 0000000000000000000000000000000000000000..60749a371d619fc343fd460fd5a7f9e59bde8d92 GIT binary patch literal 96744 zcmeFZWmH^E+a}s1Bv=w0lHl&{P9V5T)3^o*u8lR8U;%==hv1D%<1`5g?(Xgmjr;WT zzTcelXV#i^*33F<$%qX= z6^!Hh3P?y59Q!*LN*#>DEvO9)#*um%293ax>RC|-;v%0Ug)&Q_Qw2!_WzeZUITu6G zse)g<`OifEe>Fq^RwYR3gp=No4F%axIUB@NckSw*kUbKB{+dc8E;;Kg%`|i#wCgaHNdG`X~QqXZ?TJSkbe6 z3c@;R`5%Xxzra78yuE5m*dC>}%q5N+9J^}T`LW08LlLnt==ry2!YIeSyKQLaUs!)! z8t1S8yu!+9=Efb;huUZ)ZY4!;)u(#xf;pu9Y31g%X?wJ|HVpuoZvqg7y<5gPDj(n{ zP1i3L0>~)5J1TtMDV7;bQ>y^Os=y^|+ zcBxO_diR~=%K4{;so3Uzc&f;?z~2wda1~(IIPuo9lF#Pz9sQJr;k?X&t=a{4_V4YH zux6)jty6ancI@G7rS;{N^e=ml{AAegg=Q z8uV<0vmaV&f8(I$N?uPvzN{g9tPXr+`x={E+Aayt(8@VG?UQ@*zqXEa#kexP`sH_% zbLi5mb{I^MS?;z5C8b)lz#V+H&T1I*l2>9Kfr0J%V$6TnH#PPwApUAQm z&|wt&#!s%1`~G>9)ax9F=7PrMNs|H!f{O_E>_5a`>*IYc-gc*!AJSGXs%CfcBd&gm zSlx)OpbT`rXuzjzkGzp6;+$fQp=6`MB1cYpKhYpp6a1~F8i*}XkWxiGQ%~2 z;fYu8tM+9EGSZPRp|9V9aqhrdoi0SUKin0LP92B={V+bb9xLD*L3KC%kEAI}ZwgEA zr%$Tb&1}KX%LdGnN&lo>xepJww(8U)TAFn!2S_9$)q3IE2X<_^s;cvIjuj>wQUku| zlrsm?_W1s7Xk(;FFkP}@h1#D{0X7vyh8Q^fLS3!2pIK6<@Anjl25aVC*Z0V?a<`Np z^jqk$%*y_yu7~l0xW)#Hq1QEJ)FCG@SLU+JTRKMDt$2r+qL+^S8D+8~ZL5te1B(cFke2}Z=d%{uX6j#TZs3vcYdBkiyuV!D3?gu*lmQ_gzI_saO=0^$ zZ854?Fcgi##>(T3^_e>S{coQg`{K=c9o@WHJkaLX*sUprCXbBaS9@1ocE(Q1F(?~R z*7Qu-Y!Y)_kteI<>U=2hjCSDpY2P$aAtUZ#lMlrf5OShOwEaT`Pl{2{;m zB<)khkbk2qOjm@_7IXCVt+?uZ4A(8mE5+B|uG#XDPbpfz66xFlWp3nJq#m2Z9FJ2f za+zAnfnNl-pV&H5NjbOO2So}udVjD*?Xf3@#CCnaB4`dCMy-7}R0bfx{8d30)A*w! zev@A0R8=s?ZO{Dcp6@2;$NO%%$g>N+ecq)}%T3~Nf!Sr9qVvYI9iz!7rI5B zd5u?XaN|gbFV`pKJS_JOONcN_mU2-I`m^9rbBB z!Dz{HKCQfA#z$t#gQ&$@d>`)H?>R#ci;bR3W=6USQXDH1%-`+K3f$W@>4V_!0G3L^ z4~3CRS{NDh3wWx~6HVC(r57@ZxnNzAM!defIZF#5n@taWw(-*d`KabAs>*zg{*fdh zG6%XLl-uczkE;Fm6P^~3fDxqtH)8%5ZHJ)>pic=_V1|Ag`vRdpE@ JI#y_^IwS` z(;3=U0^2Mp?*x9EuF{&_btbWim@)o!G8X^4ba#oZ)sc9~ZP#{EHuV(vggAwPZpHEbt-+Z&M@RQq^ErvJAohe$;JKH)WlzAztwIc3U zWp;}21d%`A&D7o-pgddU1Pz1VW^ZM(B%aAMqo+8pBi7%_j&H&6O5iJwz~k@HhU5!_ zd52hf5!bc>{m&UQq%{6k`@->u@?!QIE!&AW)0hg{h}F=l*8){B&hzzsF{TXKF-n!* zeQdweBFgG9zk>Y^B#0*LEhXO3BpZ#?%=*`8uZ zcA79-#!zxolsd-uUaz@AX$v}7*w;yK)*zR%OR-miieb*j8=p-!+Gh;UD-+g`zxu1M z5@D=xyybiacS1y7X4}VjI1XhOIe2WGBv)X|MKckeRg8>9<+BFYFH(3o$=_{U`Z-WM z-xWhk65P*YnrW9Q$e-`Nh6BN-Xh%!AlU)bHd(BsPj*6abHMk{L&Q6vz;ZFOdV{6V9 z9$p806$|bJdW2fHfvhq;w;8VDr#(3>1=lCpGd5Dht~>R_O3+g4V#o${ZuoCc_GAU0 zg+ESoQ~Vt@tVO-eONdDsMZ25S4&g->mb4$3X%VJ#!Kw7KdfWz{(U3MXv_dsbFz-CP zC^HM=(n>@vvklgUFT=`aH6JEF-3jdZ&^gr}4D_MeYeD6&PqE}to$9N^WJ&|e^HM;C{Tv`C3VnY}-= zy{C_fm0AQB`~HvFl96=Po{R|QqU1I4&s@Ue^eGI{%3ZLZ4&xG1ns39oH{MRYXii~b zwrkII(U)D#Vg1+u>D)fhFR5K6PD7>E0krt(!aQ~v^1Sz!YWlA0(I09?+U0GGr!bUv zz}~k!F=<5bGnc((8>bwRh$63F`U(i|W0w=p7lsTQ@3d}o-nNiL%4z8zwrCQXMl)=x zm)T>oOMIt3gk>vunbOCXf;U@A4zs#Ke`}ZkSDDV|r*or<1eYAtJupeLLiH*u72mfz zRYU7t&lU4h#_6Y27-ZtQH%FOk+I&(pzd>{+vmk6Yj(kQWH5ki~wE{ijjWQtNRGlz9 z!#FeVV4zfK8{-~2!V+^8WDBe(^#1x(Xlj}8 z)toqTL1-;<1V2z40i3CW%Xe4F-)QH^J~-4)3SX=2ENBTh%rVTFge+X088hHRm4Mm2 zY#XbQqqe|Yg-V?Cb7iM+f0A)5hhPUxErPzWaH58mH44l=zco$^+{-IzdB2=_~@QfhNevEaU^$Z?hofpLG zI%&gaS#?o2IOOuuLYL`9W5Q>Xsw}xzG9`SSKJ6zWIzaX#k=15DV8-v%eed@4W(zQ> z=oZ+9k51o2Y^S%>!m-k^tkgk#go$AUI=4Ar#!T?X+1bBEDhHdFLNIf`SH14cBbo6# zSA^}$#P=5F7p4&_0EBj2d8=ifvrm3gGlwH%v=*MFs*%lPuqtU$dZQ9CFq$f>nYX>P zyDkm)MOn>FR6s*lGk*uzp^$^hV|-%G>)F>72Omx4*ZrPj1tu->D=oN0VnXB>(tK=> zi+eRp`Y7&w@5VNraIG8AvAFal?BvQ*1~p#^1^1@0e%dbpTI-vSwsiD1;C)UoW~5vF47Kj^IS{a<~#fq8cU-NdGA_mJdQS)UELGyVI~DGF=@Nk{N&Vbi^2KSN;_Ujj8$BQF4M1?E!^zA=bzb99#x^GWBQ_&ccKZ z!9Rl!<2e+B*fkCM{MnDR6M2$C64O|C%tpPz8cPn5LZ}J_nbHI{R!v(=<%uM3BA0~K#fs_r>? z&fBQLnq18k8C84#&wUxN4V&XaKK2knuF6K?5KV z);p?{?-SNi0))q8U1|eao_x6OQnKQAe3d^NzOdApMwEmIY63Nt@bwie6a15ARL$(6 zk;2D=QN@lrWF|b66~vLG-x#t;r{%^j+63L3PnsKCB_cHmWo@LAQR-vlS2e$D*-=zy zbr-?${eCBh1=uNGVOB@@UKuv8-ZnhS11~5tHyiPxXLCn|zS@sUJa!qn7GwR{-@C82 zju8$}#R#=Q@5rppFR5pXYOkSuGlmk>k@I*pOfc@G&Ju<4hM+jMvB=y*ed*-z{`c#9 zo(^tx2&T>yz!=0%(^*Zj**;6lN&vuPizE zyvhgq{fsu_jY%g7ZCZ~?phAnVCdMXeaSW=z=EY8<%nOJU1qdX=T4l;&UKH;pVo-_| zdxSZZ6}&|X4ZU;h_iKZ>yaZK0%rJwSLkg0T%ivXusAYX>3ECRM{R??(8DRq=yOFxM z6BMtLOS4E?;O9!S@^?6j-VG7mb_U{s!9v>tN%6syI`(}LQI|=&GmX1hGamabI$b|` zKLh!EGoYAFxu8i8nOdt;op$cUKJSvcbY5DqaZ6^#d3Z*H`+mH{b&q*clEi5D)N$e1 z-w)#~{nhtLILRK_yi(H?KOzK&MXBZ^;Jwxf`VGE)H8%YQ#!jY3B`qU%qB$zqJH*3( zDwFb;a=T#LZZ}Cr?c2ctO%fZ8u`i4DH)N=cS+eiOIzMg{MXY=B{5m_gLM~(ZMXnq@ z5DwDigN;HLe01ATHM`RU_92<{%}H-?9QEJGna#oK$~I|);!29SAMYjuZI(n<*fXpJ z?6Y`Diq1EeQ8wbcm&%PdAD*b{E>A4D;{>!07G-5W7J)@8^|~&%sE$34mXvaSIpf|bz7N9RV*_!Lep0(|>5(-;w|KS4*sxAW4= z8`io9`x<3CYKj%m3W1V{kJ^)~_kRZ%@SB8Fh*xtk{(7A4yroUb?I(}&7O;2aYoB2a zcQXzsJ*NokPCGIZq)3;9!f8?NM|R;i*CBrO2zqXdA}>`*6o`XhQwP45WT{C9T@@_no4x|n&Yl=YrKdLQm9)K;jN)o0zIaSp z#0thiyj|wkZA<0A(%oLzjz$I^waMhA#KfsCYPzWQvS(EKN5-CM#7Cq1JN?GreI@ol6u^iszQXh z$15C7(SjNl!#8~nBoz!#IyKyi6%`-Qy+fP~o^#fLk0$pI?gCs&_yYEO%wu)VGB`ir zPJRUsS?J4L$pmf_(T!UUN6ANHp6TVnIR%?RM4r4ntcA0 z2?PxuwNoe`4A4V2{o5WEgOyB5i!SDTO0yTy)dH&)a|F5#i|UgI0r?rDOUV~N+G7q^ zsh*sT`B0#I9R@YCm-IG#xoxe(_bO;1kuBX+phkHzg{yr7P%O|_eQHbgLu2k><(g(V8 zsVLzSsHw`SF^F@*01uhJ`Bb?eH5utqxq^NC5q+{@3}z@oOot;S-`k)EY)RdtJc71F zySm+Yx?BvTYMxIDEDLbC(~>AD6vbUDa6RB(ET_%HyzwJUU$!~_f1x+qE+IR)&pVJgX$e6l$V3y!Va_-B#HKlR}@ zTt}T}kNxSHr3ghSuzHSOvpK)9Z)P6X*9?O$gmcW$ASxW8NT}sf`Kb+6k6nRfaj%3q zI5j)bV<7ub=M#Xm4?9*hocGVP^4AnQ!<>gh`G&;ol#}gduXGe=<2do1tT zbE&yX&%aGMIHtdja6sJ*yyg+J2#%HLYA9hYSA)1&Id$T#3Dgl<l+r$$(MMV&nOL(`ZsG;i>!nXQa zr?!w5-IQE!{P*)f8`rn|$!0`mTrf}r#;;HGJTe!v^BI2y?H!=ymDzkVAv%P}OdDS7 za4^I+4{fUAfL=N>?0N`9GvD*#-dZf*+3eT7a(fC}PwEjHT1#L4rrgjM#$IUQDYO`I z$ZHxj-+QrSv|SuDRz@9M=bH=UBQqId*$cthDr8U`)ycyIR8hyvncQ{6m}n=J-%Buf zey5q!_Bpr!ER)_vWvyv3ZH`9hv;VFB-A6hW&^GbPxeq6PocVIGMG;R59>}2$ymH>x zuvVc0WvKvYy?|_xh?*RvTclnqI$nanL67U0Vvn4E@%6L|_GGhf6GO%6P9PHtY8VYc zpjO!Za{arNFaBYaNQ+`H71joRG13l>r9sjNt3y6Zz=E9#5HXo>X>3u%jKI)CVjcK1 zvzb`qOUh{VvhRt#W&0)*V~%5UT@!6TI@|9E;2rd_s(}YX!OVww&irD_Le#s~tJ} z(9mB#sRg_i7R9#HR!*8XPSbR(eP$`hF?VRvEp}N&K&cqC)?dnBw?7O9l@{wfVqsV4 z;3g-L1`h7M9@Jkq6RC%zn^a;p_7eAjIr>fp759{C=AEM!+WaC(QnDL2ZQttZ=#*8s zPnR>U$NBFeEubHy2YmbvsRGc~I_hmEE4@Ju>@xOCYE?F{mtu)O+<<^z=iL_}lSi|M zrx<#nq|?k<%crmZrl}HfiluEZhVIR+&WJB!0p-o?Ix?I4Nx6_Qd(Ly&A9t4E^X!AMo{_$JtOWB{OQgr-Wq|XQ=cZTs z4ss;~`z=nEecsY1GG()BENPKbYusaq1w%Gut}F9}0O@9+J>Q0i7q&Lm_vZj@FgQtj z_vMQfGWXfD`Wb<~f6&kRB-_Sj)-}n%(Z1kHkg^T-&vTrFsM; zOZQ4gm@*h%w(sKW*B87(t}udQDc00Xt~f^>LsCy7#yjtFVKire@5vO zkwEzm`Nk@`wiyp67VVj1_+J~`yXd!|9d6JK=1!y%+YTECizf;BU@ag z@W2<~ST6sMCe2J%m|P979{-Sb2rTOW#M8K8YJ$2#yCsI4Y*JQ~g^7jT4`ODX;-myvjWc zxzP8hvqEqpY|&!5N!}%fGx-w%XNdgVWCNs~(efNb*lm}H*w4+GZ+u-LeZKj&Cq@#; zhdw@ZS|Ix9@=>IQTeq7v-?(nW)!ZDG9FbvWX+b!fZFw7#<60yv9mJeRBjzG`{xY=;6(#yA!R<> zbwHQ}_T$b9*;BrRcLUdcaw^L9bVr);_0G9(4`{X}#DDv`VI}k?^uwE6AeaiNUqSyM z^-T_`z8l@N-NnS(Edv6--(yh)!-iD?)E%z^+iO7T<3?U2vxRs-FtqvO!%^Sq*b&Ea z@2OKqJL&Tq++rwuS6Bfxhpj@JewmNg-(DATJzEyN-7bHcyUV^=3GecY2AY)Y?`t>- zSki9!RGxWlL{Ei0N9*P3G~UzHb;K-#VSkOCK}rfkAAn%4qmU|}?fa0_eHUK_K9$QY z;t!&MYP|J}3Db%{?Ty&VtULCb`c*+_Pssxkynk_hT$=;Ax>6T)jgw20%{fYFr~t)6 zn2m9ocGwSsMhPAMeT$l$ac*=UjRUm;D=Ga9_J>Q(#l3A8$c^@Xi5#!tJ&PzYw3kE) zs68rv=_&)WaAKH0oc(@MifX5v+cO@I&laEwCp_`*Xl|4iE!9zymqV_gy~CIiYbvQr z&lDp2(yc?(;xPc*tt?|t41;9f*x~9TdThr-yX8FDKlN&V!U=2Hqo`jqIkr7bccrhP zni8LC<`M{;dhNLgy|=pXwnexi5=CyUK!$k7oP&;WAI-&GZKGN{r5nVhHV+LelpdH9 z>C}xzXXJJal{M+en*Kz-92o3o1>Z@%1%mK+p7H26JbF`s}vUDjlp z2!({Xw2b!r>mIGK!6a>5-ccY`^141;%5YYxBjFQDVGdM8= z&~0NcGoMhvM@)B6qeYhqWlD~m*RF}wcIk;$hPBqD{L)GG<4S29k=1O~%$VK~d<;s| zOYtZ02ef_-+q%Bpv{*QwSaX64mnN%m9&vv6>)9CEop2GZ=H6})RxknAvs#*IvC4SW zX6q495M>}r-DXDoN4enW)i*UvA*nKEptJtEp;c*$0I43hkx6T!fJqC;Cb4tcw@QE8 z9>!CBHQhVod$wo!*{McK10)?92$F{d`cX3Sf0pmWZux&_i=A+-cJLYJi5)E zBEoy+Y#>2XG`5^0)64`@=(`n1(q%pD%UpLZL?mWSknvW!KdBWHI7vtSLFE_CWdv&rd2B6qY_n(%6aUtZ}MJRjDV=qPFwl+V5k#V(6( zSFi}z;D5by*f!7c2S14zAa78q^NzXJ*aV}cricGxU1X81!ht!P3 zflHRth8L?+RzMo1r3VL-qMcI6zw7BXT{Zq-^2AHLt_K$k`$+|YM{1TrVP-5DGbtxj z!S!F3&>6+_qJ>sr)~^-p%H`SG+@-P`piuc+dWGoxS0Iy8#oR^;s(~b<8K&15cjp5V zEke=O{4#tg4Z5}g9%B1CJ7RiP6YqLrMtqO5iFOAr^4`t8oHfsJcg)q+oCwjM=T^zi z@a1Pqzj%$qQVyguw=0ESv6h!8K)Um_F3VIT(6O%YM)1^Ot1lj`AVvW>d!>}Yz5P%Ogm(OuvoyQ`MY8B8+oYdrCz`OAYC28}yUVAf zqyW#nhD26-L>l!$j|$D6Qe!d-PQcyT*G#KBdw6Pu$7IJw*f?T8mqSEd{{>fP2iWZMyT zHuWf~mfO^*+-Spz8~m>|Z#%nlS0JZ#X7^bO%JSn7<{*ZywMn7ltnoI(JABL1R8A`P zy|`ee!FD*4paXaDP&e@1eY!W>gYu+4T}wfWf$yDwM@v>(Tj%{se!*(L#bGnK{J61Q ztYuU@zx%4Yxg|KE&1r1hQWF9goT$0BmwdJ8le$1Lncv}%SIG;;nZrGD)Au5?_@(+po^1W0{V>tjfoRe#?$x+)Un zc3II&hLLW*a~XPJ8AqS5ozikT#dN%}zSHv6y^;^f$c}lEkL_&tKy8%}M`ip7lo=5C z#rptM6e>pZ7$?`@KlPQitKYq~=ryKFBei(SjgSAj)KvCetyMI1IA}o+l2nY3KRF)G z?Sv}RZB8E4`5LL+?(LzbX8(38q$~HU9Qv$*K1S=%e|A*9WHssWu(MUD(zX~z2w`14 z_UuxjcXVBA0rk@!bd4TwO%Szvtc!yy%gNd*`u=F#+)%?iy8U}iBmB=ln;9Yg)jsjs zmt$>SGh`Y9K^c&lL*W_vyI(vf!9bYNg=2Sw?Ql#vbm{+MB+N@MH*~ zn=mv&$SB`^_8eKnfL4FDvsbc;7`L>Mn?5yIcOCE?v@_ab0AsCZFNCodp@XxVXGsfqAF6vai8(G zCM7SD!vL5;(z@XKcaHKRHtQ|Q^$3pSZWraEuTy1Pc_mb_ZTgt18-v~AVXyAZ@xtQI zKm01cp`NM)x2u1uZ*B1x&YA~RkKgTCU#S+jUOSV=0A4{5yxa5k7se>F#uD;B#zCG^ z6+C3H4Z6=jqc7w}=W>sO)x$L|4P0*#8Id@=pGPL7Ju3YkKL?F^4maynxVmvcGm~-2iNY>MKOL`{ z?AW5T8Vs*EZW2i%*Wt_tvm9`e6ftLf(j&2jxDhAwgQEAxAsp)^7E0E$>f4gPhTV|O zkdR-Ka@4tmQ~NN7zs1&4h7%5zkB41{0$eFUMF2@4#c|ZwgOVP5!>OUb zr~Zo2wwJ!g`tzVkEVqpsv5e@xQssj3)D%t5uHxjpTOtZ7Yrf-@4IkXAn(6#F|0ub3 z19q&6NYZ!tte-OlEJ~(D6E2C(hb*4F!^!k80GTb_w@fFv`(74{h(D zN4=ZSuY9cQ?K8^dJf`$tC|&t(KYtoOgWm3U z9LD@eGzxea(_O}!74%3rB9yynSbx{{@r~pDZ#Lrx2gfmf5)sfyR1Lo4_J_|hNm@F< z<>dX0=rQ~%;UsFE-%W7l-^gKcq8ZzVk|HPs@?v+ielgPRW~K=H@$iT7Zs+(8SBp2q zb-)cgm0}(>rV}?H$6={KMNEQC2zOvwbv2SgKO+MIx9Nc#++nWO2UX!kPzvJREk{-U zO&}{7@t6P17Ve-GD^-<>r93IvK5pxLeECsaotqDD zf(mB!yoft#NbfurGs_F52cl#~MO;T3bTmD=-Q(kA`O7!M-#Jce<^%@q%2sT@Ma@7l zSwu=^>|_%|zAVa{Kj>M{PEx<1>~23LR&3z&d=O`7XqytnTXdf7S1tXZ)~K!EXlNfX z>r|_>nfj0HSH@E1nZ<)uB=Ix)7ECLTr#`TTm@U5ACr4!zpK8y zqBdI0sJd!?sb=MMr+d6zOCzR!ZV9GI%sN}g6MvkMTmqCsSw^nMbH1;VRBpDcy>FlN zHjbj=JG4*I5uVjnYKgk<7?#$v^Zd>1UM>fdOAZdwGl%attN*pY-eftm)@rLQ+guuy zOjDFNY#um>GlEW9>FIP-3=0z8~|%UkD^;u$qXQ{SWWkGIg-sUC{dFIIlMj$oqM{1Cf%JoQ=j40peOu32M{rrGIDX*pj5J#jS1my1yW`W0v4p7&5Ujs2ugT2PjelkEM$|*r zs?IN_yR)4|k!fR=#AsWby<&NPWQj~ovq$r;(Hh7XBO4d5&5oDdj3qySi;7^{SP|O6?qVv zx{8pubNrZohrU>G@8X?`6#taWK&WElJ z>W@|;J1!t|RpHfhRy+!(nMDih$3;y40P+d;dmW8r$C{`LY3B$e{uAXCH7JY=HH`gA zI8XeelPii@DjG@Y2xr0c$qli>zbjM|Bpb6~Aw%^_ZQbi%JL=dkhrITIp$C6|Qw}|~ zIbdF@cKQuy%?i%^3f&`K@zZZmKH@NvM5kIt`!|tqX7V4QfVXu23J>bBJb6y}ui(J{ zud#oKePq;(B>D|!<}>9Ujt~xx8<7(mhIZ29+imE>zBHbtpYQCw^&ebB{|_j2J83P1 zE`R?gG!V+H%!vXO?wwd<>O1&|;7RlH9Uu7DW2PpAH&XUs(X`&TJjmlM@)Kh`E}Dg_ zYo~ith3Ps0u7V#OTZwc(`cgGySpZ_gEHcC^%GQR`oQp&=N?yzLOd|sr;R!M{vs)cJ zlXv+NvsM=n6m8pI`349Tl#A-&a<^AJldY$GvW$dodegjWSTnYH$KJCo|Ln6v5j-G z>IG@FNUeLi4eZlpq|cd~mpSW^>IMB5PgwLR`8OrG&j;Eyj%`Ozvl*uPR{MLXGY08q zWs9_U{rUvychJJlpeb=8lgpPSc^6M*k)XD#1|4B!NM|V!iKJU@;_SAiGeh>LA6g_| zlKWWd#AW&`imHfJgojydd3$vfHaIT?dPas9s-4ky6y+?k;m^|ryd zS9R0UB<$!)k*`7puTkKI_v?z54$GV)d0{Ep5M+A^9d(`zUP+u)Hu)RuJtk^8-19EM zMB^&<=?89Xj0zugz3u82Cfu8Eh_S+ADZcsvH$+CJs61D$-Nk%P*vTOQ9zw1Lz$5tj zpBsdJDchdYfp^>0oE_V}RaI`AccunQGe!f(OP0G!C83*yt!vb->%1mIxRk=_lQ@F4uJMsO1y#bkBq&la3 z1G@vELaJ(0%t3lO7%1peY6MgCY%c;&j@U(|#4&+{H=7F?ryJSd+_<~mS`fWrc~7)o=zT+BLAhh0*@gdtH@hn7dB`*x;42WZ=S(YXl@1eNQKfu90)}Au9v)h{ zHb&~nI4%zY;WLpKz3?klEKR`q+GBXt5aJA*B)gy6!58UY!f^viSw;ADv5{_4j>@|w+{-?W_ zMoB7}809smBHbFnJJYE4FpW*`ZY#}eR$s1rv)++eTwMCH8djaG_4@A{0{>JnXwgR` zsvlyOZ_woNLOtMb3l;WwsVqR&mqa6%oO#%+w6v~N@l&%k{ZSN=%c=FdFFV;B*i!!F z$e>k|XbHi28CF(SN@}+ym3$H%fO3+a_70|l=}u{m z_A`g>Qq)oBG0{|I!#2xbws1F5uTXX-#@VTFS(u3rBTbPi{6_3#`8fbeiT(~%AGp!4 zP#6s7kZlJmu8*~lv#8La#ci+@gip-^T&tt{l&DPhE1%~8XKX=DS4n78Ise$Y5c}iK zo%zB(YkCz=T7&Z#B#uClHCVme{fm$Cvdw5A-_%7j#Kap1;f*v_j_suytj9c?n=RX8 zi=4~otInbE?|=CW^lZ^Z zd@Ed=P`uJ*dhMhMhxI813x#C~P1az;*GTxdlE1(7aMbt)&S1c23CJ+d+0$0U@1NAz zaoiXrG_mZMdIuF3P;A!0YzPIbnDHWTxcUEO3P5hh?dx~E54T&QT@dLBV(*KK*7IAu zkI9tH63kUi2&Zsi2JIO|{o4umLld`ALh9Peh_=iU!;*>(O?CIJ_cFBw;FE~E0;xItdoWjeN%3k zcG(qQC!R7-)`br}e5g|Xd=Rzz{4f!+E8hcI`j!%~1+=1fXcGQok`DtVq@B5k|E9e0 zD#W0`W*Fn$dn7R0Nn6&_%OmQtKcYQ-w4HP5XF=_{o~x@7AJrepp25e#G_qEd=8`?y z8`VnzL_7byC@P89!l+IXQ^4Ev3N5Xv7~=%S{~c zSRb|pZAYDiW^_O=7H>rv`K4YW2dsP7BQ}GLoNwb4hI*5~Q30J@nnK#ulZk^+DHKA3 zMv6}bbGe?zp~TPjU+OXSy|#Q0b0*1ALt5#y%LTE#l(EHeZ}LP4O^oQh(7`UPlC>Uk zN&bb~2cw2|;%~i`ISDAqor6Z?W?oQ+T6AYlMne;E^lgLv~Fay$tSd<6X%DHB{XxRneUL);n; z5wXUV%0~YASb^1lB8)5leTtOm{nyhVREz^C^~QsQ>Q~LsU8vJoBAIn^d9gp z%JaXa_507w1OE5gMFA7=LpI8Bv7S6?w+Tt3-B2P!)qmr>1u#Z*+%b`Vb1N;Sm-}YS z#);H+QWZl3^Gg&TEC2xRTy0aUHdQ0!b`bxBraw~$1ey%_X)59aQV)(Ia1xB!kZP`b zvSMhgHlAaDQ)R_gtpVZ3N}_ZesmE8^z>!bVvz>I%3Za3T?9Sl;9jR25y1nnnQ26cZ zV_Ot<>)kJ|FbH6w{X#pBV(`(C63MIc+`E+aWq`|qhtWp;p9NC(d;diVG-TsL7Q^<~ z3}ud15>db;DV3P%-NE%dbT zCJg|0VZ5V&GE+7h4dwxUvH|A||6FK21cz83Wj^_z2WKorZMrOccn|5$i4TeWaI+5W zdyW_XVY8GaVp65ng1hyg?Q=l$`vz~=NyZu zQjJx3fvt@uGuhVxfo@fZ6FmS`A!0aB9m=h6mBIx>W{B_Mqe zzV<(9L;I~;x4n!#1m&dPn8#~_vC!-JR8OqceC3-Np&UgNfOb|) zCug{cV%NFj2fjgztZ!+yoe zehKy>phWrb6;*G;NXwVcL5h6E*pIr*u7~CR)QpgB7K#YTkmFlYJN}64PF8>dfPw`N z|BiR*Wo^&rpQ2HHz7NA8K##zIjHIMXdD1*$x^G|g43D0+xgTXMpKS+}eX5}2$j4Z( z%h_+&#M_Xrs>J*(oCD<|YO3s$Uu~WS0p)I1#mf6cJh!gXyi87|#bq1;Sz=EZeU5XF ze{%8fpgmY4$oU(S0|OeRKn$OVjuVpe0zTGqhoQyfgkRxG+|C}vzwkS`Kihb{L<#tN z@o?X$y-{j<_g1pjtPSN;1plb5nT;i|*L@jkP7{BRJC|FTdmP4r>W$orf<*K(jOFHB z#KjLR1qk8Mw`E4L2hn9+6MX0quY&?;MVQL0k|z{vVRDI5RwaAP(Gs^cz{XBXwQ zP-Z~U&|7?P{EJV?IEV=o26?BRZ1FW~bOc@%rFg3u@WisYO9$M|JgpI`e{ zsj4-H345Y*y%lqQJO8RH6!SBhVsXb)e@frg)S__ookq>)UMFg?C8a!-QTCI!#NzH1 zB!&!t+WBzOqb}mFtw?Em;EcPy;?EUTg`&@O>-DJw{& z798WncZuC;=}8#L*fyKv(!!K53qF<6XP_TCa^{NH>{s@BQNxe$IOqFsWL;2!lVK|v z3hciM%A;C_v!4w?Kz2+JB@b=)WD$!Yli2Ub?nHq?v5~WK|LhA9}r&i%Pem-BS%N^Jo z@hS7`Hl$7t9iA3Inw334v}>2mR)%v-Ff@JUarpYHg8GGYxd>@=VTrgS5Oz5*>OFjU zqX792z&K@;z25-38R#2rAS=;xSAeo*!4M1SPwMg{U z=Iz!}VtW5lS&;4b7X0dG>aT7tuv^Zc1SbNJ$UMR#IqC&UIbw$0X0L|F=dEXN5|FN_ zZ6|I$V#`SW1G_O4@Ak?TJob1qJkxkq5G#{FxaT9iE?uy4mB4;$hN;nHz>Ol+UjI9P zhB_o(%AxH8KTY3&yUe-6siQXLr^2|YQa!AfvUGu3bI-+nL^y)I1*s7huP)xYSc0W4 zN3EZEC(IQRY2n6!D&OUGH5O!Nugz_}EDg{F{9h8@odYq*_Bk5atj%Qs`mLE@=C@KCd^74vCZRzj1RVM`oI>KJe2aM6$!0Fki zxRfLXpiTC~L6ry4qD#eIQ_!O0(8{BupK?ecaG%Ou{R#=FRm`os&_nkpIHdS;j@xMQeQMP`X1(x&`S*>5gG&1f)T_QxNIyR+<@L z=x%8QB!=#k?yft&_x|p;`7~#rv(Mi9S!?~*bF3<3qFw~_u{_|V6dm2V&Kzs>p?O&e zvn20}fQruQzCh0T6JMb-XUKjd4b#&0R(%2Y&C&_?!i5}U|HB{f{kHY7z(TD8&sJ6U{ood#F@lKKRzQva-j3B6;tyw$6wLzT zHhBZ$j(yesRvzbX5(=vCgxzc3uc1jI9LW@BAg3p1 zZCw9#=Om*-3JSF!Hc^toqp4Q)X8E**nbA(dIxO ztgh7&taM?S+I*0b{MaUx_y?Razq2*G_xz~;u_qm-am>4Z8zV*AhRTWE3zqiaR+>?Bcv1WWC498y;DTvY2R^V3+P`AAF09`nyk~6j zxRaRYrj4H@I&1C|RYSBvD~a+%C%mSM!wNpJrdeUspXeRkj z)XVH!9EI5Y#1P3=>bd* zVJ%PTqF`&=g`@^t`L3J|d@_MCP23`L+{~Eu_9NUOybrt_-@FaF6NqlT2bR7g=OgS5 zzg(R)LSh6vzIh#=7MtK35EztPEI+-sXNUmMDI|Qipv}W1_hhQDj8f$K8`g53 zv(?}_HkabC$HfM3s`h&b5l2S%?ll<{c?0cyb8@QyQ3Ug{;~P2vD|nJ!8Y)%D)k>~y zsRD(EK6DI&5%-IeTJ=fKH%oWk1<<)pMCUtb*oiGaQ*_Wmobc(4p=+Frb>Ad*efeJ(b_5N- zcxtp!dN+J%tKHYt&;0|BK457Trm{TSn;hQ?sG{n96qoUyz;NoOu0{NF!G!y=a&?rD z@x@9^{#(a+8~xj%1f_LW2RsS?#@x?!jXP20ta&(u;VR58h93bxDn&+VTUX+OF2DiA z&`deyp*HL#B|BB8l0t!eR7544D(vkR7c-L(M`$m7Vnwk&uFNJK$} z3n24rYcIWlp{`|UnY5$adZ4)UbV2Fi!{i|ER&@Y8a0=em@Ia&_1#syBakOHLxogLQY z%O)rxIxw7{m}!}#4ZJvteLb!o(+27|NOtAc>R#r*zgerI6oRC&0KRQ@z)=>+Fl~pr z8av<5gLPKeIcwIvqC=yDL-lsws(j8;ef4952QZP9Bfna&%=5}bhX&+Fc*-?!`sLs9 zDr^W{*nzp8l_wpxs#F@m?+H!jx`lG^VTkLab7I;<$Wn!-aTS;fT&aEoBw8;N>hWnm zX9KQI-?xwuVcOy-t?-v!p?pbYKB7wNUTqrdw-JQyCkX=VT(|CMdA*S{(7CsZLAKnZ z2PKy0TGoyUeo$~y1};7u98*;~2$*Xd9)ZZ5z_jdCER^cBT;2*`Rnq7}w=%@zc| z0<4+|L81P@T~ejYS$H^y`xTy$LiYXBD^hID!+}*$6X8~z@Bsprz9vt@d;Ni9!0ttR+y~>Q zBdYr*7Bm`Ao;>{IXBlXHF_!@X$}Glop6T4DET{=O0(k6U}e<-XB zoh$pbcbOv0J)^<>8i9z19P@rON<^E+iaDENohPRLSGn>7)ExF6-&7~ad2qr&oxfw1 zPhyx zV;Tik4<2`Pm4Mzggq!iNmd~1hL+}%1&MTKx4vQ}hOnm+5iuxwLmGOuNa)EK2WHJZ- z?WqbP@f&+ziGUai>vjEXp5!4WI(1yzHb?k^Bmwj49wCNC$S3p`mf^b}apj7OU=y2Q zLMdtE4~~rj%-ghgm=6MbA5CCi-FEFD{sxvY`R~s)Zh${YD6R|iU8&F4ZiEjz3Wf0X z%HPfFA;tRxX}`Tx>ZF|XstoLwU>uaYX3ZehHZCX$*sARLbgdX$*Q+6(aARa&V1rM` ze1W@dVLnjID^(y2{yQhIY_9md*6*UtA$Ve9h(4*S^>S5(5g=RKLSHVDAjvLa?#P{} z742;ZSNpYJ@B@*!ySIqT+bdpZrg%Nw7M(n|9ocQ*7`EBU6V`?!kBs->IrUcih(XER z>+%au415%)@CcC;MoGrGEO(9MMn&~}u(9Y5VoV%TQkr6MgforoLjO~D{JfP~Q55IQ z2Ixp7>}Scxkj|gA4ZqCGJ-Gtd^~?_ZY2F37k2PPlDoU^tUdwR<4Ff13J^~!Yrkn+Q zAm6*JAnN%e$kyKQh55gyk2J$VN~`jlnVfCBcQ*A;L&DoEbtcBACzlS1ut3tGgZx1D z(B~}1b@5$^c*^KjgU`&NdJGWholVWt4WGTX?~tkE?dH7mef{1)l+G4c)V_G)(X-P@ z`Q&l1gIZ@B*zZWD=YT3E9p(oWU3QP4Uf0%#uPYw09J!A_UP#So6@E2@TUdD^x5jxxuQsObabqa!YYm`I2&H zm7B0UN&b(2@QrV@bi$ewSvY|#B+&U|qn&$*2a6skZP>oDVFO~&a&=Vks_(H2U5o0x zE5=WS*x1Ucx~sg~bt+NLq~l0c@|ACx>srCH&p?F3Zd zRz&to$u_z~O1Z($hT$PPq`zE#na1z^SrJdw33KofLEPB8<8LC7=3I}ACKhM>omO$H z!Sc9LZG8u~q1La6fi)>Uvanx6mk@EGn~&L}SAM51mC(}}=?HP3SQP_hpIEz@g>-{v zNTltMZ@{|)v}LOP%YR2A?;%u&RV`To?~9Q56bPtrnM>iXEtT8cF{*oKCZWrdK~FTHQ~@kUa6CsWcdWewd|Pdr^$Vz#=A8UxGScd+GNe04W!L=5dG2g6X|UX5qF7rS z@8K3M!lw|oELSHD7>IS~J^bZA8&k1WeA9ENiHUA;H58T;ISNCQG>Xj?43J~RHY->D zlfR$RmgwJ7Hox31ktHFhK2SngBF8$4nME?&)J%p8owxY*N#A9%yn`T$NAD?Ln>!1& zE=))4%fSbuCQfIY3AI~WiRIcNK74qGvnTDzh!GJ_UUE!5?-Zpts18E8+XfP1i9}tW zuQrZ50Y5BE@h@3jIR1RE8#O?yVw==h0&@-Y6lOM_UP;;i+RI5^sAlcO z>eU5FB2VaM$^;MElIQ`xKn*>MGNZmx%PofP3?6v7@0-30BeTucuP29wt8RS8z9%qi z6HCCA{ySm}#IV`!I_$WQgJ7nrHHCcvCk64@9|?#YTPaVGe?O9&5ptuHp@00HW9>vz z=>4Bg&(8C6Mh`mLPw-7*#r}LJPj%s+B(jpfDrf5@r2DntKKZO}>0Dd>9AQIMtuH_w zp${-2XD|)>H1?+80d8kBm_@20MyaKF8g4ZUvXq%Ft9&gXMc6iZ2f0>CyV$=W!#@XPsZ5LKBx@?&8l~fI|5{N zo=E1Jt;XOS8hKZZgO7S}9?v5-BiVxfLfg68r*x1|Yvy;%M(+ytt_$4smhTG5traRddBKm;j zwQ{@r?ed{a!l3vwU}MLG_dJrQ zFJ$WXZ3cCeE)D;iLx5;<=xC88Q)nOG@-HUiRmo-W-`MoQ9__p;?Kf^TtT6NOO~Eeu21?fUC&3*iq(th%&f6ks z(-@MFp=2D-C71y9csefiEt+Qz;>Xw8rd0mhf_1Ly@TNw4IO5(xQofkpk=_|_mmPN* z3r%vStoU`N95|IhX1e7$)!aR?g~}gwFau%#w&Z&G`vV=baYxR9zcWMp8EGYSA0E8c z+GiHb4@po(Axfgxe)FM8Zn!{;6SU_5CltJKZns_d3`@7ECaCyrkU|am@ka8Ad zWQw1Nj5;(7Ef_Hq%M(~YU}=t3s9$DKNP4~(l0tSz(8C@lLC=c*e5SB{Zw)x-ZGxc8 zIsMbi7P*tvhV;Yjs5`0?H$X#E#&(}wDifRFM|?B2WSUP#4dPd7^%ZH2B{A`6na4^4 z9bl6_>)~}sIV;Kx3$0D>qQOng^iA(Zfsyt+&=?r=4$_90VQ!L~+^(*dA|`ldp_0uH zyUFjixd;DPVa~?8a&lH%jTSZKc~gSv*z0v>HnPpqIH(}N1Au$_^8Y7zTJ`&~bf8|5 zA<1bqX?`o(Z(Nu*xLO^y#L%n0XrLY{H;Ukmn-6i;)|n;Qa=m{52apxOcuXvS-qT4k zm(qII22s`ib_mM-6a|(x=lyr;o~e(R!ED6(jGYL)C^(-~MW&=7{_!7sG6HnO@^Z-q zb++~=|!ox4Q03e^! z*FI6>;7Z2t5ze#qEPAC6?2MjB{sr#{ms}RuMolQ_9mtb0*ZWPkez`GZLwOcT_O@UH zh)dmu5yWwmmw38k)ejMvQRW$n20%GG?Kf16e?Ae$-7-jocj^ZbU#QgQs+N)`9o^xK zOsdQS;-%xn*jGTy@6I`2OZ%)5&<5%}_g*CCkG;Y@sADY9suuY~!049K9DNp^eB*`= zkMlJF+?nIb_J+nGt9XvG7S1T53ssc+XS8pB$P$?m>R!jxpH^LI5;`i4mi>B7QFu#O zg)I;JGbz_{`3oGLbIyK6zGz`{BQv#9^y=n^~16;>ZB=6oqPGSpIgJe z_0p7m2)PGF7gNG7xvxj*lfWEsqj)Sm_ngwYiSmpNYWZYW*+X*-;7M{kKX;_10Pm9IAX38kLquUPj>$q7+@`-v_l{Pn^YHDIbP7 zYz#NffZBWtY@U=Eu8q-vsbi(mdoMw z60B}IZz=_RQp0p|O07Ns8?NwZF1n8M{1E$`Si6(NvsnY6Bkw zCKecb%&YW%X|)j3D*J2$#f-=)JrYQ}QEUPB9<*C>R-{%05x*KcpcnEez_i1qxrj1X z^DJB^4+_aA9o=fz2L-UP>z^YBTiBKLZ45%eG;XrkFw+G6UtPauhVg+7`(PNeG1?)7 z97Sl;s*e1ywH*9)0Rc3+OV>$7LKxHqIPE7350(6kuT>uZg%-|M)90em>|uH3C+TR8 zr~Bl5T!)c!;d)Ogi?4dljHhlVY5EKK^u+zLF8YVnzn&FhnwhCw(hQ>gtdMc7nx42rx)&5jC~7C>LLj-#~qw{EX-kMjuP+|{`!{wNHi43p7@FP z0{po#b#cb4UHCNmO8-6)tN#<7pcL=s`RR`2ABlb$vA=IEa}#l!#h=ogj zE{a0y-E(|f_yI-$`#tqv$o`>UHh!u~N~0l8%t#LA`C#OH=ZgDLi90Cx$;&R{+QrrO zZ6|ydJoSJ+!+Ub4vnhyV2!WPF;y}SG*`MGl;Rus!-x}OdX=$&{(Aor|{?h8x4JzB^ z&aXG)5T3=0N+MJK=d|dwU%JPu4aLH%3 zh|?F}mNAipTml^&(iqF9XMM@KdFIX!ulSDXX86QdG&>eebed*(l!)@Nxw4eTm!Tu@ zGz3Y4+tkkavk86Fn?uDKj+n} zp<_Vc8AR;A}>y4bsDt`UqH-U}WVGHOsn{)E*)sfdw zsJ@*YUwwC$y4-fP22|6;fUT1?U*}Y0TRand5AI-%o8yOVJSyr9uJ$SO&|V9(w4L>r zB@F4BTj0kDjU50Ths9sB>0deCjUV;dioPz#3$<1UP-z*z=heMXoc8xyy;C+x0c%YB zAlD9&z+1b$Vynbh=%|pUqE^4Uuy2=nkoAPNGPeJnO35s2NwxhdGx(d^7frrBNp{6Gh2$FQJU4`tip z^xgkWrbb`+?!(L&s{31*WWgo1=BB^H|8uVRmf?fbK7t@@YI~iZv+q^&$C~BkA7dU| ze4yU#hD|I;@|$xn(JVR;tZu_UQe^e(Xj9*yalk|4uIk=5#>Y1&lnA#%!*N~sV==CG zKiEMeXw8xKT19vPizBTkdF(h&w;vs)mUda|{69IWT0qrvk0HZP+LMP6_iTMAl$E_< zOVbpj1A9s=eRS`+gwBHVlZN+KwExtcTsXz^VqduSSO&gK-H^j?mIctYt$=`OXuJHk zzDTOF3qp=0X&EwSFJSRXcgg~w>rh6-#7`5~hF(3pt|!I6+aK{&fOsLDz=vlHG^eoH z`Ln6c`7HXjhN@vQ)NywJtVzdF0Y&QOpY2;+Gcy=$ik?2Httyj>z_ zZqNL;$;|JlRVHoi_s~eFo*DOHPu=5*2Api!vx-cA_p=kKa4`b$`|pa}js2=g^RH7) zMQQK{)~qDB5zP301+Ut4MjAfk^?U`w?%TCEQdpKD`6H)j?zhfYo;}~&s?HD3RdiCW zGx2>AxZwi8uGE-Q6e?Hg?xEZZg&|m7@U&=d{KH5U_B`F?-`v@ERI$pI?YaIHanyNR) zJ|*C-H~Px-*_*sRQxgSiRJ#B6y(K{_|mSUec`boP!V zav~HGN;9HQN|J_md<{I((Qr13v+$3@o{VT*1l8LCVIKVfZ(QJ0>IO~Ia4_#G_jVx04Og*sdIAb%wyLNOx<)XT#9@~(uP zhPuucSvDGNEv7U@FmEIZ@7*~P8^#JtsaOW6$A=1ZS`|c7it65tlCEdugsb~iH@whf zZdAR)!FHZujCnG9W$}WL!5J;XVR4F6oJt6Xmi;TF0C!`#PjzIULD$0r_C%U_A!G#TEc8fTeuJP@_cDv zSJ1;nCAR&~5UT=_Svvx)m*+_^d2ZADG~I;LuO5ZDSJr{MH=kUHf$q)=N>w}s>G^~| z4wx&eJc5!RRQ%j%NbG~iqqHAl9s zhk_IbC)}YmNIM3DJXiqm8o8V7)v5z;Cv|8fZ@zJMQ=8x9Ty_SC2)Au)KN!^$JTh8h zvgj;w9%o7Tqmw||cjHTJea6dHrgXZxW;s32bAib^tk0}>^ zA<`36tc^9>^$};^G>)rQxiB`-cG#<52SuqYbiwxEPy1>W20rJozAtzh855%_YDEAj z&##@m#8d9JzFSy?g-RP+C7Q9u-h2rTgHpnQne1>Zc2L@JsYDlvOUXbw7CSpxtG5utq%* zXZE?dtf?0-d@LpJO=>1tHQ!4>&|nK<);51hmJI&A?Scc|j~+5mjgUvrG$=2lK$t?q zqQof$p*fc8YDg^wD$_)QN)+})s` zu*EbMR-peMVvc{gd5jiZ27RDNRQ= zc=Eyc@Pm%(r5(e!WY_Der>n! zFmlfNN9ZNrUlOi_E9V2=E4z50PWJs(YiGUDI%~he8!>9**-jMPtNue6vf7MYnv5@a zI7@TUt%tocJH@v$YDT&s8lYmh>26K+&t?AA88M2tmaM=p>(uwy;Z0OC72_%2^BdwqNs`G#U5x2w%ccCD-NQopesY$zn4ITzq-JjtJ~vg^2LKI8;Qpa z*3ggbTYqj3>2qD{6Pm9VJqoTj=EgVWj(^uaKH~*}ZO&{%5%+Vzp78dz{cO)5@iuv# zL%VS28LZ9Aw!H8^FD!p`Ic5s;n3L*eAZ+I4A{_qbSr6n~R z;NadP&Jz%Plb(pt+IxIn)#lY6Db&($+NW7g@b}G{5u2TQAX3nV9y2o<$-sfDAGd|b zoF2?q`MQB#ANe7{5N&1<1A~0t zbz73|tru3RYMWOZ*t^t!rU;PECz%v9${R9$?MdHsvT1eqy#wr7ASTx+H1kXT(Au?q z2!qhp%vUdR3`=T?x4u3NM%E=Aj3^a@5xEvT`w5qd44XVUB{`Y=Oz9*T1eF!-4X;aT zxz2BC;kNS_mgG|LM-yvp=zA3qK(~ozGk5~Y`){NnWHF637!9rAv)Ck~ko&!lodm*-#Yae-6is9IxS#nob?Gf#{Av~9xXz0$$RD8IkenLu>itLRLzpXPCmW6r9ca{Tio%?hbhcbY1jBprhOmWACa z+q7$Ia_t=I_H*!+^J4=-4m+(+)coAzkym@@QNY(gL{nt&2?Dv%C z{LLf1>2vdhj;`w+1cHt@K&7wjgZFF-RZaKO;Q}*2%Gq3UV_l@hsTvK7QLh{7Euk z8{H&N=kO%Yv%waH$I%uA#xu2|JJM@Lz3)YJC`q>l%JV*+wb<#b3YK})NJ5lS_r!Y@ z7Xt9^W1b;o->ers0gU2#q-S5^q{C^xXKGdUQUzPL9yb4k9cAkr*@NRYcrql-^D`Hy z17Fb`9PPlh4>@7*MlJIxAm?l@8QQ-W_E?#Nu*H}ResRu~QbnN}1K&SgfHBX9cTv=I zW5l1s5z}7~yfZdaI$La8+UM;fmNw?)Ec-6yG4upDG~tyxaVn*{xtc5Dkk1qz!r@c0 zuRP47AKI|J^s7c!k)Kh!k^0XqEGi$53wjk{gZgbrt?n_V%uy>Kjxfxk>42*BvnFRID4%sVpyQ%p=R`X7sH)=x zCb|zvgvdN-4eA9Jh?%ra_;#Fo*W@?P zY#Mzy(W|dmQadzJxiz>$KXJ1Voa;Gl=tvT-=QDi=RC*q$%nQ4RqxH%q*^%4-bdo3m zuOL6b_tuu=Sm%-8M8>h2pAQ#r0T#QrxvK?LE=)^~a>NhvGWFyXWlTc8gLN-T@PsuH?Wv7z88?<#C(w#@%Lql_+~pa%(2$RvVTb4-yw6DMpGV-d3PbywwKOkzWnJ zMWSpJs|X{cBlm;D29l3e;5&}**M|emE;7KzgDF)X8J~JM$ycS}sVKZO8SZP(Lm_v} z&V!9g;utqLZddj1D>bV6Z2R+%L?}pGf?02m5kRam<^^h7M;9ezZNPEiHCe7p=lrY*+Td7^z?LQ&9_B1O6C60N-{I@R^ z)j@K{bKFKsJ(5J6{fs7_rSBi4^DrZPSUMW9?~$BbQZ5~X?Euf5aH1K}5njP7utel0 zE8ukCt}BN=zOM#bL{|f@PWu`VVjr&n;QoNp=_k({sgp(3fI~fnQsBmF31#%0}tcS zbq>u=weED67wq;GPPg9zre+4cBca2`4bCoOQ^pMLy!WFWI02t;jo|6n|EdsZCyiif zu~x9^p8Y8#YWrkj%e=kw`Lq1nS{R`{y>xZn)`>- zN{QNHBlDc-;G3d1Y=CUn0}FF%@>lyWC@6-fmO5x+6TEu)>BVa~FIF+`W2T8Vn6j4? z0@s2ut1V6UY4J#<;FczV-m_^zZ_S@t004n|=zPV&!HKUzt#)kV;@jmLs&r&*@ipqD z%hE`V(1bwz%N<6AESF~sJ`}*B(MQAkA}~4Ln+l{&wtKMKJN(_4`O-86zJ-eC(7(t? zlJ5}y7zZYSGv6-|0Lp)lh{Xk3ZGFM_r(gfk-CY*)3R_L|qcNbwSd{V*G#ckad<}f! zgd4o;cl-vsz+1joB2*7{(%;Acg+g;jiH_K)iE_mgowzriQJzSmn+9%n-{+$SB(h{` z9oOJfqTR^dyyftgd+FAoG+CB7BhrCI{tJF|ECJZq)d^O$!|FzsV+b}^7}Eu;qK4Dj zh`o3@&!00_NH=Br%1UuqvGc8dA==2M@#@fsb4;np`tqH~7vw2iy_wkmFnhV@;+e-1 zOw#0IIMCRW?~l{9aM0KydJZV*_F{6BTd62ts@GFhVneqf`Rfwnh95A|26k1Od6ILa z7|`OAb~K&J>1|9iF5L*DK)AKgfytk4K2rQ4)18rmIhKSg6>#dfxeu@wMI~Fecmxg3&{PZS_2al5; zjuB;r(y&K`gKl~h0jV6tJ|cI|?g5sa;O7FmpXTgW;BZ&iX4yVN!0W$q93mLQs`|bg zZ?@V<9%ir-#swZmcqvd`0*qgW$!>792UHXC#;n zD#AoUBBjU{{2e5mpg<_ub-9*Ye$w|M&+6M?z~nw}co6-L&vPU5I#uf3749dKJGO?v zrVzAL8TqIp1}*VrmkvC!3bDJ#KUz0#gKExj#SJIiW$7J>@@_Q6No?EGt`d6rtX8Az z96z+y+^f#aoWw!4ffd$012iGj)!_RiG6qVxOS|l=`?!M)%;FgRKUnUKHLOM=-is-e z!0}@d$2kkX1fYy813#SurC$Iy?*ar2Cbx>#%PJIq?tQn4knc#HS48ex8X3zarH%}b zlrF>0gn(U=?1($+${qNvmM-7@8|ylg^Y$R;EEUO_lKd=84;(aUZ8+pFO| z4f2#d$b;*ck-l6G^|7mRTLMgf;E^+|;l zbo@m9s#tnW$J0L+_lX>S*d{q2RyMCDK$*w4Gt1NdC~aimQ&mE3*=>x?R6Tza^JVw> zCA=?K{+lMx=gB^LH<672sP_J@y(qRZU=9jyKAmxR4Z^w19FdhHc7F5O zoWTA{)Uj@rHJoGYC<%2)R3D2!^}VBvQ;y1&1sL#kM7MyW4Gi z%Z_P9I?*y*S04@@A>npXwiSY@95zRG`WxaBF2{fI4+i|6Y&KqSGy($u<^|mHSFq0T zXE&^=oP`$uyfLncoL)hTM?((38kz1tS0(tM;hGX0OvYj6_;_38X|ZuVha zU72mNGRMMR6ySZ_BY|b(6rrosyE?|!Fi-*Z44fUnoXzu(pM3ydR-7vvWw0j6ZzLwR z|M?qHmQa;TK1N=lMIbxLrZ)lD<{BCKG(P zDM`zDlHt6;m4ggh4wEdnW6V?3VMM*OsElnW(f2y3)op1^{C-=Gi1Y6a9h?)61L**K zbYSPCMYlR8R$9yz$rVR4H;wLGQ%n9L!*e*jB8mAR{9UEfGfC<#r3!F3nrDm;NlSy= z6qQE5a)0@&vMcf^#A>=4zd{ld!%GKDS@#}fyU#gA^aP)f(UaD?Srm@bq$ z2;7nQ{G9+fiU!uka!y}zMAtdTb_Oyv?JIWg@pl8^$Qb5p2)z3n8v@y$OOb!2Kd-;^ zA$0BLm#72i&H2`Dc--LmKYjwC6v3NeqK3vw7~`8=+i_+p!x<3V6XOP3gEexv0Ft;0A$ zm=kOS&pK&aQ8Lb1B2WU-`%t>srL<8lkoNI|GS-7h&?FhIf)RqU=PC4KfQxfqw@@C* zs8Q2D>jzX+bDviAN8uz@_m3bO7^-v{jr_d-LAMQEA=vK1pVwgyV2V6Kd!sU=y??A? zj)D{~i`H?DVGY`1@&zrws`2~&j|tyf=4h5rJz>`O{)I$9QI$v$mjy}89Cb|dSD?kB zi8?tN`cpkL*2lPlI;IP}D;hPw*eFPdIq=c7K4UY2fM-D|z9otfu)qMzNVUJ2$sS|@ zTOl#02Qx z_wyXhm$~p)R*`Ct8c;b-G<#85?3E~<<(W*0#R?}EeOQ0H@xD^ao0eM5vg(ps$o7{%P_o z-NWJ6N4>EK6MRwFv~rJ_fwdrpZ@dl`l)zM%{0M-`im}tg>-B`~blCh^n=S)`!*YFy z+rU0&i1ix`BPZf-*H(LOd{v~k3&N^-BG7tA=&kaIW`zfRxZw?u$b;=_t^Q(`Omw0% zc)1{h#;~kBK8A+WNDrNG0ZXd~0S*sDalM}Y6b}F(2%zv$O8e7|5*+sX$IOpr8M2MQ z`ZN-D(rIx56D#A|nck-Ew8JLiBF!BND-P8B1V`fY^Y)k8N|%J_V5imHU$##^?>09} z4DyJv4}pc3XXkVPcNvw8=)@g9V&d(rn*65sh;_5a+D$o8^%hNKlaaU%YpR2_jb)K9 z@OA&0-KHaa;>+c0fGIHzXM-pdB;DH9TxWzBm0PP8KpH+}W{;a-uj2o=)&#vdZ&We7 zY>?eG{=we}Uk61W_`{_o>f51NYuuDVkxFLx#6JcF~t&*nF>Ie9^%Mluy+PJh@a??x5;EMzxRe*dt{76d>#B!%sm<{;UNx?c$@Ac;@(rmf? zwv^J0a?A#ADMeFesMdgUmPrQ~*nliC0^igP$f~VhweCceNak`yot}y1f}ig!I1IPk zF}GtcoqJa=gy=oz-|0$R=2GecizdDf$BXO9%tSqnsQR>f>pPT>&{kn|8sf-p6x9jO z7Gloy`vp(fRyn>wm@{ZU7~f1SDC$d|5tJa%&r{GO8q+mj@B8rk?T2E3Bvy2f6p^gw zLYNH^+|3-3?ZS+rc`Zf@%V*cdNtle{apDjAIqD_tF6s*{3+CoeyHy8JkNkmpVaY^} zg6qQ;u{ln4bdwlog!g8mfY6`VzsGBbubWk*$?fH_wxU;wJkGzhnBBHcw zIzK^dWQ>CVjYXbPwsheu!F$L0au22N=8xjz7Z>1M6$f)*-=g8MCQO`fmvwM#+Z#kCi#mI3|09IxPCF(;*_Jp=Z-GRo53*lw z4D_Gd3)SSq);Y3z6#|$$N^y}$3GhSnaI7EC`bLQFnU?eVVBjkm*F3e1a(o4Bnt0Ao zNE^qYeZUDXO`IjuR~$tBPfZ#Jnc?3T-7PA$P|Dm`|6ZKuOscCRjEMY(k4EKfD1BS!w<?T(DrGe z)IWR!*t1pEvUpUtX^Q76l;ec!*2CR?KvzvBz)V{I^kF4l-({AgFhc$x3|hqp`MU#T z-IEuorOtpqb8l6Pr3FY)pjw~s?e52rDV{4V@$N) zhPW>l#DWED;k9*sWAoQ&HUPNGbk+#TjhFbYy391=w_$UMecBQ5ehdfunktCu&JHT} zzO>$-(EV5|kQimo|U z4CoetMip~3_k&qfcD%SSZ^l<>2$07(ICyes2wr~szG^IR;f^Xu&7ow4IN+QiNc=epn7->MXZuFN8#B&Uk*H8kaRaRNqiZ%SAFy8tpx0b(t zabm0eV(K%3E>w5Lc&X((Nd9NlWjb-I)@R=py_HBvu@`%>TyuwOb+(`?e zq_I`L1a!Z7FsoHD08t;Pc4o%Fp_X>?FDb$Y9Uh=YIW{1(vhF!JU!QAA+Zb)xCn`{z z++r4+*MSMb=z1gQE(nN21>6%#$#d4QxFgcY7QayEDULuw$VjwA&uN@RY~+`w~0DlfIELnvMlh_wb|d@SCFb6PHHBT8Nc9WZiO zl*SXzP)zx7=OS|MayhJsTlkbn&OpCiV>+^7f|L43smS2{#EiFQ)2#gmJ`qLirg@4F^Qh&}lajtDYsTbzQo-rdj)41I}CWR<#+}#M$j>w>#HrlP3|V zxC!@PItR`lD^NyeJ{INvP`zz>rKx{)9&p%d7jBiM7^Fa*G?;~fo5r)ZqHc!sBgqT> z2LU_%;ND6uhV3-Xvm>B146IdPN2Xt`KntrDM$!oN@jAP=zE~V0yrJT$#Z%tEL$zam z=73)#>i?6Q`(#R7JB|^Gk*+M9%PAHn{qdme_^=Bmt66{_?+~N6C5Y*p3sI*WkNy3P-v`y3aab<8#G9t^)9?{`2}{u3 zMr%hOj;g}Q{*kH9umcPSaZQZ}Fh-1pDooK1Mn8>9Aq zUUSTE9cAI3NZA7>99{o3v@gqpvoX1_mUdL)c$!hn*2keEZ%Jdhx1!HNUh`3Ggc1?} z8kQIQNHGsJ)YQ*P3U%f213Ur7hfR^+|KyV+{NJP1(`MJ~!^C-T6Ir03sXUJ7U~Y{@ zzbrAEt8d$QWW)ogUYBhPg4^ZSC7&E4t>|F=qT6nGs0j@ z$7G@~Z^P+CPKN4Wo$HcFAj2^aPPGXOM+V7xht|!8?fcUqZ{W$W64hPW%$@IrFgdlH zD&>ppe^5Fc?T592Z%nI>eTh_R1tF}dI+QIQ_BG9Nv$)i+Po~)Ey>J8tq_`tGhdw*b z3_O*zvGMO~>?XAS?Oi>@s)DN^ad(Y!-Xf}`w?%k2+2@=U>7|*K-XtJZH|+gK!X*yU zZbG-jA>mTjG2a_|0fh_+jyTPG_1(6M6(QhBo7Pn&oOrXbiX$4*BxR2J)oLiJL0ani z&oOj~3F@PhN%gQ8?7*Aks#jN+{ph0e$R;~ zCdS2Ms}OEbBDDqM#gDZzY6H!A{(Vm~br$T^y)i-zJt+f2Ai+1d4Sc@uL|C51HE)zk zbNz~;s%NfH$~qznvQgzjUJei8xn zuf!sGqy2t~d@3~dYBx^{z8F)3m(vdZ)!!aRg}dPX1eN<7nHz2;ZiYi{Tq#$-y_K&~ zopI6MU-$H0;zr{{>gCv~BO@X8zR&4gM95bSp(_>4ffZGIv1GIUSlPXp8`X5H?<&Ip zt3$Pdim~GZFC;Q>DGd+P@IyUP(*@ActD!0t0;5mid)t>kS3daf&p7`NSzi^O>C=R8921#*u_ZA4jgHtrP%RT-7d!Oz@UXmj- zlXLd$S+mz#CaWWS>a?;=A>~MjPsivb9UV$s8@$@;StYFfec8U5=v_#i8bzTSs$d3e zk{bU$fv+e;UF&<(kN3OCGg83bJbuJ2XN(y165kytI8Icoq7w_9gr>l+4-+*yD(w(k zr_wrB2mguDABVpLH4Lc?dJb#S%$bSOE$D6)WWbP#{2dYv%XMw10S~5M0Lu7{+B1Z1 zA5kdv+8G<#q_5x}s6qkMzdm=~Gw{?!0{xfsS$hXjVEUox817Y08$+f8fBNN+@wfMp zj1NmAYPYwq#8yXX77iM&O>19US4(D7N4skyy!|IqgTFNF&U-gp6$_{Kz?0OMCVtVN z3BLk9l7AO}EVn|}^@8ikxFO?lqu!O9ydH)KYqIl3dTyCRh<}NAS&d>%`Xwd4Uhw80 z;lXo2q=48@d=QvHICRR6Q1Pl&{D#tlg80HIkfw-8`FE%9CAPUj@l3&U02el_~V~c&G{^x+^hnYqekY_V&9 z)vF35D@LocZ`}%&=&WFmN*K6;Edi>w*z>AAwp7fy3Cgj z4JPM^3ewv&>n1rnzwcKxzj2m`N=||}*$$1CPdyl~gI9o+5^TThM5EW8d)x|-0*tlt zaD&%VVf3Alz>$-aCRbF?on0Y;m!P*;6913_t43t zx%^|-vzW75!Tn+3>DLI(Uu~!LS7R6-xe4*6OxeC}YKI1#=bVY3mG$Kf^XY&z#k&h3`T%wz zx~J^H&LQENWDL7o{PU#e9Gt*BmO5IRi@Zg1bJ2zy!GB%KEOfbeVb_el{q>7bAHh*e zNR%a&Uytn+ihY85M!k`UUmJ;^XU_Z?Ijj!9d=LO_etW=Us8#XqpWZH-ssH2H!8LI{ zS|qARyS26$OSa%&!92$X_+*V$M2kV4hQL)PTZ;wG)=jURXD^pC$thK`Sw&3TZ__4A zUCnOUZW2{>$yHLxhfa$psW!MX#x3o+UtZJgctb1N{=|L}?@T%e3!pmojWXWr5LV}c zBJ-wUDksNV$~g}AiOw7a0d?B#nbu1qBPN`*Rn!&p5{8S*0hT5kz=oO8Lj7O;EY+}#Z= zbNKOvAr$ljOzMn_G`igJmk{H*a2c~-`5zvRS@mJTMH{!-Ju{@bH2<(hEo!hV~X07!fTqws! zr66CtS!AJC3Sh(O>Vfn1%Z58h{WFO|cg@TaGL?YR8`Z91&z=~%t{zHP&LW;JN?xra z^z}+EJU$9Y1$$q?-VoD6Vo1xZoWZ@-T{bk9X+foivu$=sUAZy`&2ZMLKBJ?LnYp=A z&_API^N-2Xz1(#4cl+5cJs6?nojNviCYAqVZbWC*_$={{E^`^N#?PbRey9N?3)dVd zay+aAOSfm(RI zgs@m$cxaT+Z;F+(mkjcaD!j1&o$zT8Jd@DT`cAwxiyxKppXx6Kv(>(}Mdd&Z=Ko;o ztjC%1GJq~3|9EliX&JVDlPkD1;|%XD>!S0)UfW zf^GvHkio)>i}ck7=602{%Q+E)koPYHqODQ9RR8`YzV}gF8CC}w-OW0^qGWPk<1f^O z%lGnO>Y8I}E%kbX^5Rl32mQvAX-mTyhch(<1!pb)03Emqm@7cUNqML0)|(Pp_0(+sxgzZO&z0rME_(Skp{#l@05D54F1vD;fr5(#%1IJj)1QzW%qrwYyA z7*MER+HP)VFGYk8j`!gP`?}F1Z9MhPD#>e#u;)&OTcPL2yV>V*e0lZnLJ2Qd3^wfM78`8nDeC%$NMc?QDd==%7cRH;f1l)@5!7urqm&lB5Ca$(IJ^0`iA$MKFho zW`t7cJ<@`sra8J93Ks=0YNC|dxa7U{1-lUKjHQd1lks;P0cnZsizoZ);8tBQWV1}= z&Q1EF_z+l?W>84ltX?C8n*x!7{K!fEfzUw)v}q%C6?8G9dL z$3fH!-yzSg)~K0nDZrmlSy`Ejz)x~N&SSyd@Pw8S6rjnzaM>7B4?lkYo(`MLQm3kAvT zxgb0h{oM9d(w~1w5;-MHK>je zd&2&apKt4MrRGAw$O6gQrL+Io##ujbG>s2jPybeiB z(gWijcTsVt+J(x1Y0C{%GtD$At{ws>)ywscGsE$&B32b1Uic#izHDpZtW3PFNg3!b ze9zqeu5SP3iMJG4s7LsI?c#_a%eUG_OgDKHBAEI2n0zs(QGD@j{k3Mhx&z&LYy{6- zQhLn*uHP^Pjlw@?I2|Zw(DJzOn&&BWMtqoHY0zPNg`h~bJ=69US}==LRd4-$?m)C4 zCxj@ooI7qZcqJjkX4=j8^`6(4m1DgJ(h`4fClEI;)y(Va8B$GtynV%9ri?~PLJrj! zlp155^nQOH3nC)fPMJNpD{c7kFG*qfB03Bjp)j58 zDM7tz_1v3A@s+g!vXBo63*H;wXCf8Gt)Jgv`P`aZlHTVy4sy!{O%r!mF`ofG7P4fv=7P|?Kppi;ns*ng*Cwtv0U69kH^ppwN&H)WEyVr$P0PN-P3R<5A`teCcHlZ zw2z0w5@5#CaQ@Gur7D77#%d@B`etjVO=wHLKV?h4!1J-gzJq-Ohh-@JMsDlZtb;ZF zFi|w$sA${<`SD~cRPoiG^>mQPrh7Y=^S8;H+36FwW0mh0#!6q#9PNc_yVE6Jr)HTn z^M;dit@)ee@wy~r7CCAUj^0Yrb!h-*MCYh^Yx5TKd5yS%uw#>6Rs&u4GqheAvmQ#*FfyEn0o&PwefOJ z<9qF9v&c^REphbOWG~BR?oE#Cs3Wl55*O%bN|ydmx)|W)viFrGrRSrzN)O@R^6lv1 z?i*-y)E%LiYcK2kp`prm$<+E_q!zf3OkdradS?#`wX{DtWBLBF=c^jnZ|8cpLl8(r zGWK=J+)3V7R*SEO)m;$>+=AH_JH9QK-GoRd_0JYaG!~h?pqtSnJ4dg>&&156qy9!R zt>=v&Dylm2X1DE@H8X7BZUXBi?g1x}jcZxz?!5p8w*BwuT+^W;l9yH#}ola{4)V5LcU>xmK~ zB@nl>MVi_uuZ=z1PwGJ;TsisO~} z{TGu9l_12=w@=pxZlF<$fZPW);$<*?WdguPb}#|c)ID9-22b&$LjY3pgfT6M8@Ggn zbz#q)xH(!NRN#4N^rwh_j6#EBLQ8sw&+H&ftYSKoHeug{4xI|$4_OoC^{Rt4jqRr~ ze&C7K{IfljQ3$QX&V`dv($!qY7V;}f_n#_UhWklFhF5KY&It5b%6_K<$5)3;Q)>)~ zKQsqU_dVW!hLV0EE{`wEp~x-(pG8`Ooo6(CZKT&Oe<2i$en_vz>yl5G9PfVLP56-d zoHEC%+sVnTt8i@d4gQQ?jF_B$U%1#cy5y*W6)s&(l{qo15qaJnVI`_ucSQc?|B?7v zi49tmD^sl}ptuZ5S+`{diT+I%(Ua>!YUyNmYtKqy%?BGood9#c|Nk=gdC0vV10dZ& z&pOr*za8PCX^R^f{b$0tHcu${)(Zmn0MnD2g!|ixOdo%HCr;|Xa_)w%mlqap*+F`3 zZ-l(I1bDFm|5xrHIFzBfJ|}qImEBPI%B+J(W`UdR^6^az`(36JjaVmj6DO7(s!`L`RmaUMeDPsxVPiX=s%*wuz$26fHlY;rI8+)z3G(umzMm+yk@e$TAYU@yy*9V#Uk^KIwY)Hk|Kf^j=@BW5%Xkn{|0O?H;@l4KxJ# z1mN)E0t%*KLtw}25iR28)Hj3pwPb>(Sh4$dQ{Q0FJK2Q-qfkI}o!!>PPh zbc>a^ZndJ6;j7Q(nTfD+a|lkg08viDhm}||`c^%?b-Dx{FeWbjH~dgM9h*^X)|-~v zYvv6s_aIw?ON^ho|Gk$1pELNxv%hk2OMRp6x*8Qia8UgHDTx?tGgdWD@UdN#?dDBSD}77!&*!T#UmLI&i=OU0%c#g58fz zV^fkKAvUJ)Z^D4_vhFvBc;0?(p^-9`%FCBWV#l#Wk-l$R=f{J=py?jz1Pa$_RrhpK zrYoMfZfa^8tB^um8HL{Syg6L~9$*~VE9qWKu}r@H4hJ9hhc5Yn`?85D=p)3X*b7s1 zWlSYD@&f&~2VsEFaxi5vw23}~9u%ORzp1)Qbn>XvfOFa2F}KvUt5C!S5`B_!fjCho z<3az+G!=o<^@%+f*NFGmVnEJK!?d2Q@JjBTKTxDO$o-KXBxV)o- zDBWO?R~+-%^m1$=nKcyx)aYizH|_kW`Myz!1rtL~7I*m*6y5*IylRp7&oSUSqjuF^ zGg(eWLXZf$6Azw4*!u?;7=6c{X&6Ee#r=I|>s*C}&@KXvDs(f|M0`6AN6PZKJ2Rb> z0k^zY1yrf`ABeLqK0FgdKjNrhH?5t1kTCkm5xUkRXDutTVrAGvn+X<;k>+ui_JWv8 z6P;)QQ8PN8{nF!Eh-{JN-8;sRpMXj*o48Z2!XsDy#Z#~xIfOJ0*->E9+KFv@983m5 z?tM48qaV1;+w3L?-MureQD>bs484So_kC|xvD>9nN^s`c)R>?*EZf@%&i2Gmiru$KABll{1js<%s-AXG|E6wmg@#cI@GpqK4AQyx~M+A%O&Z3-GV1It{Llb3e3AJl!hMX3ahF>0TE8amo<_Woo^%y$H)>wv98N1xRYbiS ze<<_4l=o-UI6iOH?O>MjE-~WJ(E)qcwZ0ejg6i0(wwTh=Ud>hA)Z=(fB6r(8FZY%p zTg>g2y)|h2qPjY)1+3oaCRA)0^NeVInR4+Qm4;Qc*daD673bC%+v6EG2-ZB+>Ibzv z=4C*2xq2LczQ@H-tvOaHI2TW|e%*<6;Sd$?543m*;?G;iMllgRCOrngcA5?kU zZ^v8vfu!~|zn`D((~&5B$~j=0o^p(l#!dZ!{T`y*Z8&6KXex46r)95=)L`;Te{>3( ziE%jzaJc^6gBo=V?85H!HcKOVvSK{fTthI>@R>@+-8;70 ze$RxWjvs6;HKTt{$s};&X*qwL?{YL?nDcN$ox(||dcmUQa$=dH^trrPXs~hdp6~uI zR~*Ba;3t0+)cV&g`oTp94i4xViL>dYq@0w!7g3w0oVhrWs}y*1IjUhwKSXl+Bdz^h z%W(Ext6Pm=^y`YpLT^KJ5z|5qYLM5_+geg=mcs#h z${)ptedHE;c*W;*>+~>gleIl6xVNS8eqGp-g?TXzRbE!Gmrg?Oq8iP+-4ncEn*HWV zbrPNn1TvEaUd7V`HPH60y0qO7*$kn2S^0^s>VGyVoZJQT8xfaLKPDiF^l5?VSQz5T z{V2=!bxEm}%l+$~{efe>gZXw^qGZ9{olZgZ z*9l`VFJdRA!WhD|JHzQ=wAq{KOp^av&RN9{9|90)C>EqeVZZyC|G2PerUa}aGiq32 z=Xb4C2-Z^>pEyOjPwPuN9c<1(*T_ukHc@Q|Xl)uOLc;Kn2wbPbHCe(A=osx{GEG;} zYtyi`g0P0YI|}MWEAP9fTfBZO4y=l8f&1#nGPVbtw)7>Gzee+L6V&z07p|!HzHq&J z2GcOvyY>^PthvIPnmv6S_>Hv`!g&A2{?;}qcX+k{J5(!r(L3;OfJn7iTNge^rkHwx z!cw&grA}hN!yF?4im1={E zjF)6)e1F24T(#1wE}|d%vl8&zf0@=^STt8s56v&mc`Y$0=k#dkv&_ZK#9{XBJkgm4 z3*#3JU4^Gyfdi8Pp`y@{u}?e5sh3?-!~5<@%dw?b0i&QPK^#gfK&?jNvGS-U@vEnB zq|2HeGtS}Bsg5Zp9@|}!LyY?pkTKl785?m;e`Q&lck>b!-lFS#wSF2-HALHg8+R4v z2c{gu>S8sv+$eRu^V5tVx&k6lRA?*ZC-&B)QW`}IS4(GAg~7soRfXx{;CY-q6(m8r6W(qzkKBoPlt8G^d7@>zYx|Fx^8whA5LSD7@2Xven&fwp<3PBS@T&RZx*qO6WWpe!?S#~-k7&i%#}DEF6oNFE zNIBZLVR=j;eW?ES&o+8UmbS_6op;(v0N4?0ZnZAmETBMHj~(*4a7%t=^Un)P47r#gL^uefs|6y9;AH1(|jVV8clLnTg9;FUGtz4!z+R%QU*Y1#P#HzxkhB25>|&nAsW4IjOeo`9 z*%AXzDjDpYq~8*E@-0weKBh`k;Nw)dI>-i1E_BDv)D(2t-%J~!uJb8`bK02xvPWTo+6_*LB|AcT@Ijoi5dmqFn)OJ^vuv5cuXw-vuytDsODBS;iAa!&Xn;(!Exk z!im;>*-w#LzpWo+$2~n>+?MAH*<7ODTJhnkII)(p?%G^m=zl_+v5=V5bxj2p0196P z=|A3EpR{@ceiXqH?Wl6w?y({E_aZF~8ORth6?5{vk#3tOU+%aaRlZsN-0}aF`qL#A z@D>jHwBa5FIi>BTLoSDpUGyKB zOfA53d6qJRgg`C`urK+K1a+$Yp+AL#t=#3&gV%d>2dX1ti&P|TOOL3@{pO!_FZAbs z`r|E0eIa|*LtMI9wywn0PhndGbqX?Z7^g@r&U)xqJ^^MkmZ%XTMG&!O*b^GYqJe&0rJOxN zAM?4+){(yzY#k^iA*x=3z*<`(P1XFxjV#;Fbg!}hwzt=5Wxb3J=FUDWT1I&;2$b$F z)mf!6odszJ*N)Tf6Q*ta1SE4h$bi!|fgQTQ6$!SzJ;w7HWxE^LymD3ZJc?tN-bFt7 z>v|Vc`4<~ndsba6O4lu8i9b0>UFA@21N}TgK28_N-{pl{we$)yot|#B3!BrEy{!2N z9Ndr^p*>vJrUq6KHqa+X)3)040SV8Nnrh4!%YFz`kOZgyA)D6g@Bk%6P=Q6}?51x~+W=lIPQ)<#ty*QE#9G6XP_ zo`27dPVKFX@BQG0Vw-3Cw$Ij0o12Eh?QorL&&82&_G};^z9t#*{e%4AaODR9orQbI zY5#6ur=gdS`FLvw_p;^|LdDg1NEPKK** zNsv2Yx4PzFkF;!)%(vk#2lE;s7D95Bf$zbG;MUFaugDHvcfhC+@LdG(oA^EnHIl=td(Z4$0`_hCFdZOn z1~Lq){O^#vAQb?=ntz8b0%!#i4-8j40%(a36U+JM22-_{L9-o08DHd-NCo~BF!TN~ z8S^!1fI22nJ^>9!BkdM6kpvI7LX40Sjy~!p-D$i6rtMekz`As>2T79Yc)-%p zBzZHTJ%MoqzrY|}zE%U!Rj(N|ag9l7CMuPwo*$#}`4)7=C$mgcnRU!lJJ0L5OYC2Z z7dy$&AfdY6gb-ek!@tgeTtP*|qVfbnyv%#G_kvmfA$7q6qcesB5t|m5Xz}RkZ2>+S_Rfp%{ z9uXJKw)v{eA|)i8(--U}2J~@*Ji7^a+_eIq?$+&Eg+taw*16`Phnm3y);yLjc^d_X z@&=F{q>DuMgQ=VscZlTQ;h$V-GsenZ&sy0VX`h$TTeZ$%DlIokL!B#0v7W^_pW58( zhSP>5g-qy#!_Bbfq}wQ`^9vuuxBuiACqET)>dRC%9KNu*lNcP%>ALakxMLk!4kVeQ zdPSh=%0Jt*Uo%Y|QlHIy+^=|k)}N2?mr8u#WVYtw^LsOU3=;~c)lN4_-u96&o?Lq& z1{`1=ecaJV9j|s)-c6v~$o`>cy4F@~_PZ(A?r!?~mO2?^Q<_sJTC7u>)=B`p&cP_4 z)u${JO4~TsITpALM-O~H%vqbQ^zQ_0HcPS0J#oyFJ`L{K+ z`(^_EYPyo%)elCmD~LU5V7_lkq%Lhf!@oT&=G$)Dc`V;GNhPi6+k2D=yz+TRU(^@u zu4ap}@yM-^GQ9Sbg7$E66}2AQmVIc3Uw_Rz`ST?0&XOv-iM=r#6ivZhdwl6TG^fl& zT`u!$y}5y>|Ccnbc}xW%{p;CUo{G)ss%mM?&*kO)DFy8hyS)CDGb~hheHSx!n zL|1Cd9e%d$-`34Ro|v1%94DiseAl(VDWxyrKR4#3!832V(+DqY(kj*odfXB zcuan~Hj%8n_czhs0;hRFijtPSDyl^|AHRsPG>c8Pmv>)!Ed;PXbED6MhG&X*9t?DB zr<)zV3m{L@7ig1M@ir{l{n`*>dO=$t;oS6J4xQwx_qJuD*d)G_Fd8?8miL$AAPUxN z**AliMpF-k-mL`41L0@ml3iuhu4~_6RjeOi4xz3iTt@PIvr^p&TuysmJZW*1GSyiXTl%3f_x=bU=t(LSy(hzk zL_>s%pA;U%_D21rC94sS;I9_`OauGw(;-pr^+mF#AVP@fcz*Tw6!f8?Yh!7hJMjpSMOs8W^VK=AHD^q#`~LI2#c=V>J%E;Y38?8@w2^tGdHF|ytb zQdiy97SkgknP&b`WD!HG%rsK$fi1jp(eGZ?7u#3*4T+sqzC>d9T z+mSl;BJJkYLh3U3F#6pc6`B<0%c1$YIHPN{jUvA!gHGEgFkH#>!vuOvZ*~qLu4>;p zD^Na7EBlq+KI`lfN+F)TCX=f*!SuCsY(KS@8OS>!D!s|e2L+U@193*{>=&1y2Xh5fS5L@eBbLAe_ej#qd zO5Kh)W!r(4MrhJUvQX0)JG;kjAJnoZ&dMRRC93t1Xa%FQ>_$}hrBZ!En~AvJNs&g6 zkIm}gJQ)V)2-^P4<&LpH>;67J>1V(MdBO#4u}}qnC)P*ZbFf=@!Bk_0{NKXy4&{_f z8rr7&*e5n+phFOBH}1F6aobs8F!(YNzrS#_RpNInjym$FI>Rd{5W z%%Aw{{DJkF-bIntfI$sCMsV_P%}tl7Hf4)$essf2=cQ|cB1hE}z>h-Vy5|qV0CTK~ zbxh|PBMHPkaOLHN^~HYqcNXT=izDcAeuA$5*EJp5#hA4N!E#}3hWc_)BUvl7VZgaH z(Bb@Ohy+2qEVM)$e<>oYlXK((e|p8k19qMGmrk+oQf0OEA1rq8s_CHb2~DT_uMB+0)gY8xnN^9^Ge`y{Kw~7e zU2tZebNRq|-mm&`*~GEruA$kP+J}DEkDi|LXW60Ea+3VTR;=8VZc^dZ=3?0-Hzkce zR&pEb7XLe{r*ryn73&KfzO=mV>r-hWQ6&$3@kh0u7Xu_j*x!5GQ*R#2B-6-fl?U?lU0Z*phc4QQc8^r=vr}GKx$Sb=6=L~QD zILFJ9+l6v;mieP?^?S{M1nGYjTYCxWYp?c8`QT>R zNL8M?*N?OWYjjw$6`{4))h9W+N3>id(T?m8WbDNuXVjrw5;@H<%jyoTtQzcqDeyRx z(pI>oWZIrbhuxhgZn0ZcvQY+nfHO9=_awmg1*|^Jlpvty5TSiu8pH1*8rlG_&A9un zd<^viU+8t$ZeYkORxf|!qn7!_@cPm~EgHifob`7oq#ryFPgaw0x^Yxg^2-g+@TGh$ z+5GwAl2Tw(9J&#j2^?$vM^|MF*N(6pUbLnU?oNh1mi-C;TJ2WsM_9+|D0_o(^P=X% zN5rQnd5wptjKo}~;_FKL%6D_zC)aVy`Z~8&{>8^)<^nMeVG4WQ#wUn(DQP!j4Y;Zs zWLXAL0gH8XL0lpyZ3^FI8UO2)^{uP8bLeeuXkbbRMipex(w6bNCtqZ`rmC2HZOY{# z5Z{{U(=;uuP(k0f_(_^!=rB?W6-=#Q|Fuv0XSjKZqi+5?zR6ns^VdW6tQgMB^*P>H zjg+m>47Yn>Q>ti~r851!VGkR9SF~GoVYr!>FYOP*&Gtn9hb@C*J|psb$)Wp{!%)PI zpkcMi`D^`scIRwCCRV7@03L-FuYW-4q~+cIOUqT#@;r}bBfp$++epHq+Db;E&VzrC zjDSKR^2P){S{MVe4|vL^RQ3}CnRgk_1-DX7Ef&{&jQ09t=+*W-&}!Ivstn3f*^wbQ z{#qfd-vYH`&dI=G9~)A&JGC=r7m%iFbn+D!jSd9Z6BMEoILg&vx^6fY~FyMeuq0*?0L_u8(s5gyLd4Y?xtgwnxUdq?@D5{i>RbKK-Po zbYFq9y|ByK>vq&PubwiBadB|#V|_j$nqO?Xz4{!UARwtZkoj9lWr#eRlc|0fuUfXY zFPU~+$w_Oh$I%bkA}7rK#Fa}7B~$y!{Ph6M1N%(ySQIshX$>n`C&Lj`GV^%oQ-;?- z_}%mycD9+v*Eq~W3PM?K`*_8?d1cD`Tm|P@P9{R0r6prb1098AY&KgdA!{c;%`4Bo zS>B>W+vyj_Xs9Z<)x>m1wF!$>HJHB9Ei+{~OBD6LD32WYSn7|XpaZ#5SU*7NTspl& zNp4#zF+p;A{T_r6H^X^p7Nc;sA1Pdv)ET`#Iw|9M1ulCynF}}-rQ6k44eT~ahxIO7 zHVL#ldLXPW?KaGi^8q1?{;G?5p^dupCu~ypw3VmM{jWO&kZ(?FvF`W+a2ev;$Ib;% z8MV4fZRtMuUzD1Y@)weF=(5>9WS}(@|#HZ?^88n(5Q+MQ~SZ+^X zCWvE!Cfg?~n-xptwa3dTGVPlS$(K1l8sgcRP3OzHqt3uN9QE|t0PzJ+veLr%eeov3 zgaKUDP+9fbZ&ICzo~A^eZ7Z29G_AT?nMNC~@U>OI=eLa$C}Wg%LVISaSM|uT&y$*L&du{b9qwnMMdAmE zI-MEPkk?3etRQH!=*$|*&lg1aKZ85z+^~B+^reHs^YkHoQEoh=@9%iZxG}=N8+2X7mRk2W+ohXl3#RZ12$^9y`#`#xk{XG!FZd#Rr*+(>_sGRX%Q=EKe;Ac9vb=jl@$Y)b?89#w*51oa(mA&r zD~IX<#}|&Vnw6%SKI^BfLTTJPpM-4L6~~QK)7NX*O|*fCfG0y8wu zR=pKg5W8tM@wVtx*7;o<=Ve!%pUAdqGqIDn@PjyaaMR!P;!vUT$2doy-olu=mE0U5 zM~X)Q{f#9}lK--<03Rin;cq$gonwxViPoQ|2eK;nM!$Q35XZaTw5Fu`#vS6jC-WU_ zo@9G_DG|qAXd+x*)B(qDs`Q6OCNLb7Nr1%5b$=}eoqIdE^2e9wX)`(l2b@i+76UCe zv?U6pVr+Yj*;7Az9BSlmO{!doR$lpnxmW8ip?6mUEQ(||Cx%K>^J3iAbQw)XPS~sb z-ax@ow+S!add2iau(FUcIl>~}4(sNxqp=2SiJ@}q70sA!5_^DspuS(WUaLb+A6Y{L zv%`YY?YAaq6Z^Q=`c~X6)w?a|kFnKzML~){G!tiz4i?3WJuZ=XU!A_+LG9*IcWu!1 zfHIf|xj9xuJwpI|OAk519$47#a=4a#54FrtBJMgAnq-CzvncL8{JjOkdYjJMpd2Wo z`gGwV6EblMAaMl0aw0?WqRm#<`?r@qU8^=-I+%Ox-5cA%YO$qaoq~8x=bD@ zya^Z}pY|!Va8T{Jhki+1I&T0H-9=K^}>!dAwzc9rbj@duO5`?Kh#Ujmmd zuq4^`qAEzdepbpF%_LXA*EQ8|mByr{E#CzBv9`DPjW!gf^h;iiafXhVK~4T8)2ET1 z);Pg!uA@m9uyiZnTEA8`@9L}q3QetkT>o&KQpQbb`+%^?@~p;cFEJ-EG@r@JHkf?< zG4=?qV_e>hkkDgu08=~tnzAi$Zk$={O$gF=2Vw0{T(&+%Ev2yyU-eI0`s_}?Ve?IV zFCW!Joi{wSf0=KS_)le^$EDmiI7+rQ1iK#nJW@F65AZC>Tl{@1Gv5r@0o91RvgdVE zhIsj-*38Z-%BYy7{kt3LNpbl0`T^&wer5U z%eAd7&;gLpHWUb^ZVFo7whW&%TVo!;s@J za8U@iF?hoq#7^3RRvI;uzuf8=qs+WrG|50fPR)QQ-#lqX+VUa@ieL#y@%ySx>W7cF z`51u=z#@d^=Plx8w)@NPj*Wc&w=|1pcZ+v5s-Y}ENN3xMZG5Oqxl@B}G$v{FMVoEW zK8LLCseu}R@TN6N=6>JCED~^j(b5xcBI5jnBylG>i#^m3$i>+nVjH)Jo-{iu2X3%B z8h)2LsT;g36YPuz;)K>{>Y5ARRoRaYWU*!2mtdJ{9s5%UHZ)1rOsG(w6q^ezxxaDC zeblwEbs)B+(T#=MWw4ePh#s?Ocg$_d)Ki}7G1Mbr98nIq8Bb_usnob0!imuXO@I|l zR0~6bpIp<$4cQ(ls%m$1OJ7KidLE+_YzA!?R!=g}aWAa!?`f%b*V!JshJL@l{|?H2 zqIF)>7=9d2=Z)p96TMxwn8OwMW zXjgB@^}GV=a6XA^QU{k&dOSv%=1Q z!h5Db7AVw8mSvgcJM!4t#w?>ht{pG*t@D>T}>hMwj0)DtD0q(XlP@Cd|+&_51W`el=< zSEfG2s}8rqiW%!OjdJWPp1-}=Xy~DJKbu*fz0x)XslpNc5+aW;Sg%iC*uQt>I+CQQ8LW2;mqw4s;QnDDM3vZv}5HyBfn|9D5m5app&((>vDHR)u- zAgw9-E$%P$BjcS!1uu=~y&c+kJb!xMRJ%C3Z7|`Dd-e3W?7?k~V__U2I6wpA0^a8a z@N&b^`TJ(VJXn=_u0us6MwsGW84cL|RRLc7OC~QxHB5|2Soa=M>|}u;w}NS#J4Ak7 zNulS)C7n96_=f7p1fJuuKgM34c*V~DXVT^U-^0|O)V9#>Y)cXj>eZbi(2;~Yj9x}o z7Qn%I{M2YJJc3ybLP;E8@y~(9!s;Ugx_IxNn{i}24SsWnyV6Z z3O*?Wf(0+i8QOnKmbi+xSoXv>mp?CEU+-}@wo|d&2)>hY`LkCP`Py(=Q8~$XFu@%j+0;WUdW>VD~!u0?iASe!}4G=>Kx&sc>20`I}4O z+jT6`rLr;*J%I`Q)&hQ=H`VTFUlN2TS)+Y_PF)qO<9fm(8ktTtzm)sCoVy;Gixa2` z3{u=e$jlmi^8GDU;1>5+7?S^P_-{THYuS1}_bs-ci>gpAqPM{YJ)cY!PF*8qxOuL` zeKl0P>kK%%wunA^h!r<6jv?6SCulP=D%pmOyHB??DbXPDb`N&k9X?JxL4n)e1h`I( zqKX40i{OL|677{-4F%T)UYf#<%vM=0Lf!CPYGhfnJ!^sb4dcNRZvfk&yiMfrU%Fy_ zr*t4XBtqVTJ}$BynCv#A`n4iJ)>w&e_h5fQRba`8Z>Jd?ayS`b0vWl>a3@O=bSc2= zinr3xgL12d3iZ1P{z|hleZ5@aJ#3XP<}v@%*^3|P<($kS@wnS8@wIAF2`7lX8xy(c_%4G2@Wa>%$Y! zqo+ALa`5k7wuge??2>+~ucKZ_&?Qc=V~X1)o)AJwaDV8DDQFA2oLfN|Js}AOrv}x& zJFW0b$XLdfO;*y)4JL0^qE+IawYzbdghRi)PfB?dPo#FTh9uv+sy9iy&b)FX0pdqu z#SQr3)BsAmsMjF;s)S;&Md@`6*NN&fIs)$08HHsznX#XqakzVG@KB_CatG(G^%Ht` zap2ExM9>0m6>%wXCOA4crGf%b$fzc&w-|SwKumIv`ktFjUt?AY(eWM5wdd9=kLzie z+~YgLW#7g`?uuFMtLf5MahnTX&a6i8B)nkmlmC4PS<*nHEC!^ZVZ1@7j@3$ zm|FeNHCl%7?wlIRx%Ooj&1yg+qM^U6DF+A(k*1 zwjSG%$Yfm=7N^0b-&&q8W~4WCkGYd-!p<^84*WU2WmIL*$V=|1p{-}_6tcorP^5? zWQQr$GiWa#Uw28$(H;o`XfA^hcK+i+spXSuZ?B%(U*q~pCz0f7^wdf{8{ra^$eokm zl}fG0%=D`2VZ?URe#1yB^rVqbk>Ae*qO@}{9y!&h)Lu6;PeMm#1%MvtMgz012OdtT ziQ*55TejdP%XKAp+aB3wyIg%q!^^k_b|_V{iaPqnJQ?+HqM zcBe+->#-}Xu0fumdSOy=kuPOy<)aaeLmRdJqD_YjG>1PH!+|Wbn_YZ=w@hvw(yYXVm4;~ylu|^)RF)U-dTh4ST{WA8 zS5F?`tx-oGTYl1A3%1;TqNOylV60}%9ZKAF`J5qhL*wn%(=)wx`n_|CgTNIX7$Vw! zYqTyotW)&3YvG&`f}x?$DABXd;txHW)b2uo5gMBpt&}AHhqU*MYHEwZMe!)2D4-}J zAfN&wU3!P3)DU_Ip#~`d>AhDAD81KElok>Qy#)eFmo6oNKiNFxW{589XbQ#D(?uO2`#5e+v$lz9nnmsA|;Cej`y zFPU9t>CuxxJkZgJ{oj8(7QEd5y1OcNqTM3phsuWT3FRlW-;;lqqo2CFHX8$xk=WZh zr0SXH`>_Wgz2OFTUyvHYmR@zSS7{|_ud(|JKRGKNL(c^_nSi5Zh+h-RRyPap1%Gxp z*Km~#<&$2oa(r<3;^HOE4+IrC7{l}Id`c}Q-C*F$WP)m3Jh3*uN6GiVjmoedvv3B7 zX=c26UCJx+T2!Es3OLGb8e92?j`jnDa<{7kqDJg|owxC^GY0SLg@ml!q?8=6lhxXJ zw+xmyx)(0!qAhd{vwmqKTh6-Q~r2$8o!TuA_DZ_BwlV`WpvZfm z$$(|=4K!qrm))x$@7dLVhCHG8B@p4D>mYL(`zSPF=3N-F@TvW8e6Z}^BGZo>#niJm zMgL$f{Y$u)gqppf?EY|QOM#{!cP1eSvy#_qMI)Jsx^ux6H+L-)8h!B_BlElVzbG|@ z9QD}GoZTRTLGZ=9_b-A1tc)5q-9F-bLS5G1_)dEoe78pCUrl8qK5YId{ovb1u7(r2 zb6`kf=ik6*kG+gY6bn7!llD#KA?$aVU54#fRaCW2e?51M4V~;(`P^HVYpLGBGH>2Nfv;8%>Z7yQZDb2?+l=;lvAGPphyj84irhoI!E=4~t1zR+$ z%(1G-SdWHn^RGOMKep8yv)P_adx8d7*Ew33mzvpQ2mZj>{14NX4b)*&OA&*h^JjG;9p(>fAcf8{%c=pp$tDhx@*r~c!uJa%>lE( zKm0Tuz3SAfa#N*e*rVm1be9mE}@T0pb-6or}VF%Er$w0iu zVILOHIph0td*&~@pR-T&U$G02oRD-Uw*7caB%nNK|8vFWRi~z=IjG%W^CQcBAtH zQ?U0Y?p_B@8F4+%X<6-ywJp-f+u3_!aOSbAI^~jEOr6}*y`k91_}@UCtiftLH~5^- zz>p9i?>J-*b8m~MQOVfI)TRm?FHJm|F|tOahrYS1Ae+G(Mz_)^xxBS%a+7^KErfCC zNtA?o$2V_=BqBD_C|^#KdrzU$3;ClGJ+hHKz~Ae|{U4AG#3@Gn!wd zUtFQ+*>9)Fv6@|0Sk~O6fc(#|;V=(V%bhH!4LhRO6hXaKLna(ca6d#78LE9s9{9`vF3YtV3vMk4aYr1B3hiqe&rZfdO3#Y}_ znWPJTnQId#tGL7Dy7U-JbC>Qqh;py(iGSGykuf)@DSp0k$e`Chb3rcno@3+!E&k#B zt^ZcClQrtd4|t575eOKhV>GXB8SOtE3-eu+t$0L1AtEC`ixBbsUbx~gO{FdBx)c(o zYv*c&lzXGl-LztIdArSSy_FRQRCgKiEDvfvzZ593*uY@IxaV@ zY3^qnwzz=#8(6}hWN&)q<{CErd>{r{x$AbH?4@=~6L~v}$yN6h#;iVR;~i#wda)+V zeir0a)G5BGzx1L~U|fL;xE9^?|8eQz%Dri(n=o#E#CYw$zo|X5#jBC4_lUq99{r3! z`gn$0RlOph%EW9L>C?&$`-Y^R*``}B=2hOc@t_~5jaqJ-?aeuB1V$MzHC)Y$46SnFmo{LJ?J;{!w^B_J^K5^!eSL6PSO+4~O-p*KJsnd%qq2n>bKW5aNrY z)5Q$0yu%<>DcJA+H>Jn^&fB1ch+#|ABMufP-MxxORIP0MhE#qMA=L(giX-EQkkc=Z zh=(o8ew5_@i{VwGy)LY{k0dHoOY!?(vRQ|_?9Jrdd55qk@}-V%eW;=Tb=w=WT4E%= zrZ56s_6>dWcwS*=(utCSc7c;A`E!=1E3s5X*5u=R9$r5l{!$7*iSkF2!R5nEp>Zqh zd-Z!1_vRVM7=arXRqO5BdvfG4D(2xvS5$6~|GT{<+1nt>|Mz22b8FwVy{;by*Mj7R zs&%i;QyZ1hQd~$OXu0n}sf(#81j$FSiT3ONQc%1j+lGQdjP}2a?PH350vWs%jbF%H z*Flzt|GBv!~_ZvTCz_OR>wb*}}a{ zu0iyR)!%j7kp7hZbB$pZ(UPKImzNzMh+j`ptLlgVxBN_j<19^k}qbADD`51w(~83JcRCM_6BCK}tD{tpjFY-_S<_ix1`fc2{L9 zT#M>=jQ9_)9h~C?y;t+iw5uC%lbvejdI`-KRC!=ZnoIhEUL#p;Ygf#Mc<0AC=U+#` z-dXxNQKwTgbmrjHl+T+jPTAwL)eOMnR6382$bZ@yMJ^C*vOSgZa$rFT`fJd8+eJK= zi~ZX!o@;VdsfojE7@o;nV6VFH#DdT&Bf2m6d;PRmQ?bm^GBeqErTR3 z+=_l!U_USi%gLmuZa5UUB($tJvs#Q$eeLoVZJhJxGRDpkqB+!b-Cu0X-;xu{C4tNg z9&^y}xrE~|nYK9o1-6sxo#5xa#N7tqkd>Wumz{GE6Een>+QXA(%bCZ~82pAk^>0ZphQhiXyU^}FQ1yu+P`EeO$%_Ee>(^(9aesld;F3% zam&N(VJ>4O)XT81P$1Q2%;SJ}%Rt*0E9JyrySfv5#*gaVu%?^gtyV?Ve@?Y2U`vqL zF3iafrW6>TkmiN%%pBBS1j&a{44uVJMVG_!AVlezo3d zQ-x>FP70gNZLdelIA@~thRPSx-8Id$4LUTQqg_((LrMf zkHD4%T|c}6NiRh(&5VVRdj6#gUGD=;Uw^7c;#x5r*9Ys3=uYtpLjXmQyz<wJI&VC(Dwj^T-q@YhJpo;QHH&Nj%)T)zfGdcF$&^fAB=4=Kes(M!6|97+{Xh$ zA9eGy-9Ah+RV!3LDPK%^se`nV#m^56=+9d+dsz_Rqt^6tNPBDCpoouRVmuQ;Pe(bH zM<7f(bj!r9Sb&^yXu*G{RO`OGtG9Gc$`19F1F8r>r^?sW&vM_7UIW$P8^=2&>f(xk z8d>n^V&gK_`41kRbtxM!`}>#s7jd{e!*;`sgwk4l4dQFbWuV~hffXVH)r_!2N~pJ~@L{s6SKe64_&EL|%X4qa$ae1o znMNzi{)G=0A7IqG!EXQ(p%xy4y{aU{cZn$R72z|VZ|rp2S+8VDLuMRpkDFJ!96>=G zy=tRF&~;{d9+-gDsL|0lXW1EU_7AU861}-WJ-kT6+L^Ol#7l{r#=Oa3c!w*IZsc`| zs{0*)chXN=h7$w!H|@eP*X`MaXaJE=8H&?dt1Wd^ze?LNihTw=qwl zabKGQs5A2c8l`9R_!8^>oqY+Xf9StvC%JM51pqxXD)A}pzyRiu{%K>P5h^ z^h7Pk!rD($u&s~I(BmA^6{~{I?qD}o8>071Z1{#0pluwjppou8FkOAfSH(N zo9txIk&wqA(YPG0)MumwfM+0}-*$dXn<-U*aVvrH{AkOpuhotz-CqCiI|kp!0m&{( z-(#6pTb|Q0JSi3W!xt{EW;dG4=V|OM!kK0C$Hy_&ocV#mC&+Z)S^}+aegH3<*wovJ z2DKD^LR#$3_0;Sle-Dm6VNhw(``O}A=N2$6F1=xWd}05O z9g}VIy*Jofwe70MSWf9NG{;G8(=>;FIxD;LTlVyY?zyWJ^E5Oio_t0OPR7mD*JG=6C3!QnK{4C7+s~AbESuJZ^pEU3&;UT6hEX zuugfkn+NKy{`Lyo=J01%gHTgydEb?HV!OD(@(JD{(y4ai=_hyijnaE3hml`YWyZi* zn^5iD@8K_Uy2X7h%{(D@^@a%+kVn$?6147prsJGyMcv=Gz2r?)HxLo+-dKS$ze4jKHto_!K$mCF>eLm z#rk|O459E;wpKm|7)xl*q zK_B;JIs1^H8w_A4_E{X?45YsTgW3B4-LZeO`s_uo9C=BFhYuA7 z{OMNQ#@=BfA~tQ`ZlC0cIjtGs7tK~hojQdS|1nCFy!%n-z{{O?%}n>jjB2qgLRJW~ z|N5)N=c-L|wm3e{U77R<2HOd~7P$1b{E}?#*B`JxGqJ4kUe{ttAVLg)%%Ud1fW zNDGb%^_bTI94%SEx6?uB)v6fFRmoS`6X89Nn=>@H#WH`q*jaDx?-{O{w8R@8xp|xv z;3ss*^;77iWfeMpTaphhb+n%^Wgs<2!gJff1APXqOtPV{1`9ifq-pC0;){Xb+zNrP z(-YD+*(V^d=ew#=|Ku^ny{c*N4@6dRXYkewtjp`r;9bJ~w)o$O)Pp;EQPoY2|{zbafa9 z2)F(HYZ~xXUIut^BD_q4MHr@F{m}Q0aj&=1i4~NyQwD}T3cF*CQD=TYDdcU|FLf05 zl-U)wa-lzaFVsl3962G*#!BoVN3P^PAHDa$bPHPChS2nebo=*2zjuNzddZju)nyh? zlbm>gY_fAV{yn=80iEGwQj3c0*6y+NR)0eP=p=4uj=6%HQxwO{j+RRY>dLyZfi2Lg zrxFS;)6n6ALN#EF)o2TH*&T$VAfOtm0h@6n96`WeCqk6s%0hgaW^ z)=p$(^=Vegr(`+!aM;b)4_p~MO7dw!IT79rVTiE|VTac*bo>8mxqhWN;KK4+#8FLr zD+39~$k=Po*Gb;7O1rzL*90GZjP5eg(qYc#iQ+r-HDu0CUOvD%ZQg49UFjq3!`gCN zaYfXX;~w|p>|~-xy}lv`(sLGLtYgS5vqt2kq%Xq5w7R`wco21_e2C6E<|tbBeX^qcL8By*=G$ zaXC}|b`pXtezfF$fB4%TDhhoq|C-Z+-C)Op-|~~H4S~_GG~tHPPc~&L0FvFMX?e>w z1xLNVJf|AkZQaM*-@4Z?f>T+9Cb2JMy+L|!ki`iwSdE3abibKKg^#IYl|{PMrYkEh zZj{=IA;mWr1K2jq(z0L6gULHD-X6zUDQ)ySwFoj=Jo0Rknut1&-zsZGj+%(y$pOn) zezQg{I=h_e7P;PVc_~8*GiJe0Gv;=)z)>I%v55wsXF0{zvMTWk@u1BZV>Wjlj^JC* zt~7^EWWSwBR&Euok01iOlM#T3R*y|Nv%MH?4P^xwlVkg;z(}VwEoUj`f=PGzC3W)aRNg*1 zg+ARj8iaY_G*0bVGN=%3+8?4M&oWsJ0Le1w19c~ij=ZDPk^TToh~T#@jRR*XRf4k| zzC!(N4R1O1pfWLqGn%OvqW~wvj!e*Hc&Z5s&)^axSXq|{@Luz6c?fg>t?ZEM=;7GeLeDHuJ= z`o**2;as1->i`mN6OyA%H|9M9^1irj@astiifU1z3+5;A=jYWQ4CI8j7nfl=ZFAnf zb?8{v_SN!`$T>~rC1LUIhgU=Drl~hgpX>EzJHC^f3_eW$ti)uM5c&V$AGi1EuH`;1 zi)>5eoAi7b>iE6`eLHWMD(-V%x8&NihsfHaCcQ{LmZpW`xv~yIi^&(ej}V*`*^*^* z`!j9R*AsWz*HtSvx(~8B9k<2jT$-M5mLN*|woJ6N+ya`~ks z2$&MISiHlZf$AYAwd~%kW)1jrNaeY7;?+=?>Ar)~0mz$ENiUp;#21UwJ-};*PhFoO z)%gKtdo!u<3T@_joyvI~byF;LGYMyF6rkgOF9Us2+`j4$hrVC{y8ru?Db(5>3Ypcf zNFZ5$f~%>j6&=$hTN;3sTA4Ce2GpduyoGd+9btFyE%zqJNmm zS(RJvEL>IMO?pja*>cP33MPotgwUOB`^b=LpGdNlr2a0;Va%JdI?rrj-|1=4deOah zBfsqLQS@pv++6(9@oTEaJsIvUz_V&Snn0EhVAnPM^Ou`rDHERp0<&7r?om!FB>vR! ziSeTLA#olu(68CybT$O|tZ_ZYuF@Y=T(_k)(35%zmG=EC-ixL>Z9IQYSQ5I z-@PY*=p$lBxUf5U>#@|dWg$%uK>r(#miYTQgY8 zrC(Iy;R9Y8uXJ3nS5pM|Y-}8ci|~q$&Wqf!S#yCnpBq%UXBoZr4CGIruzxr?B;38j zHuuEIy;+?UO)C!!igLi_{24olDCTAM;O`khmsO4!hdn=S8W4>ViwGA#J#H#rt$6;` zgS&T#;DFVjsqr{m9HrliQxG&5Rs2L;Fd&0JA2oWj+=^MNX6s z%Bw44-mkQ1ae^<$_vp7_!i%a@4+rxQt?vSGYEY_0Wt1zYz0WjF`mzyU3*L10i*M<$ z%*#=^SJ9>w!$Vy;TMx-di7DpOIlrNiXVjt{BrBGh{j&@}-IasKw)h%OOe#jX!N0KZ z5e#ELCj>f8R`fLK!0RWAJPPGW9DEeeTM+{)+!el(dm^Xx452Q*naP!!Dn_GJC8(TbkcdRx3tRfY$Us_MevB_@}?1sF-lqk)#vwA;g8vq zVd|WqrUYA zSA`xuBIWvdZkg-s!*&zdT`t+qE=AQytR1%BDL%UXhU1GZ6BVQf?o!;U``=p~D z4;7WukC#LDE?)57eCtl*oP-gGGQCq}d~nFVu8koYQfB zZjGk~pc-lVrz(%TbN>g_ULO*kpg?S^nF5mO|z(ypSw^wi!E-t?DZ_8CS1 z7F;|W^~GGZ;%Cs;Xh_xz1-t%izL{T-K74YRojw~ME;N@c!6G2F>0&K3OW{>qrXlaA zu?8zLmEmG%Fw?Zuh*gtWjlYrp&F5uWVhwl^JZ-e%TjvqIi;{_>XP>-|<`BjjQP|35 z4Mnc|1+R6*+L=LkEpSNry2r?1Y00-7PoqGGL4)H`tZ_*%A(1+{Fkn|@;2E1oWrr~>ZoyHMRPEkDCX6vgp<0B=|b%YVy0t_j>v&O9wN zgU|I!@-g1dHdzly|Jl5y{TY47#8DNlCe*Cj@}5r%YQL!K7d=8FpT?2a?&8A@_bGp= zn){+3k{XZ_&{|D$HcXC8mG}cwyID+$M`pNZ7djU3H=hP0Z7p9bcYXk zW{lyVkM?D)aOJ0Ss#Mf{*3Vye<;J{HR5gJm6=ZP<;FRjRp<_ju;N(^=zjKS9{ zgoe(_+;u?ajJ;{VL2a#9dHb7xJ+rZtn+92}bCC-5_~$x=@OGY5r{2)5M%5&DsQwkK zy9tk9}4hyhKSw4)BLPj3sXn zXcIj~fRe%C%jyp`Zye06`r%V8o#gs+8ymfa>dg70KaL~5R9ElMnX^8e@j=LE~pHbIGokQ&oP{ zo4$u9O&dk}h(7pxg|v$PVOGK|EVM&RI*(-S-kk%@8IZjPy?(A ze%O=VvE&k$+g)xc=-BDZX4X-BkILi7yh<87m?Fk{L1WPdLCFIc1OtQBkjtRWUYFxc zV$o5$TOzcStSTuQOO<7Qu{bD_{@B?&v|&Mf_mu#OwQ0z?uvA`wqFk`Al)f{uxASHUcD8*?X>qeFr_r=#4>#HDq0Y>hz&ES(OJ%0lTr;F3 z2HnJBZjIV9v+y;~)DFRHwWdb2POl4mGweHiuygNwe{&|v9T@6}n9$Y_!jD78`k5aX zM>(H%Z=X3ZZKhE?yV2`g6Z9-g#k3lCRQy*v!kQ+qSumx7=8;xiw`pfwJ0jbepi>2Z)z5&%S%hD#E^p)Aq{zU#OGA~=9j1|D&CO8ue$1G-&>^^!==#H)(w|~YpAprPYU?s^OjNUuYKTZ zf%=HG7wSpn-ne#cr5;B{L05+ z%V}J{H6O#;LipYGmKZ+neE_sEzl$vas zGO;`MD6zmvcMh{cx@ebRAaJyl(VtJm{ca-sgkM-IHRUpHD|lRrf@ z>W^Ja^c;}A9_!{KCP@K@E!84E{Glr?sGQ%(b@2TtUi8|Q?UqI;v||)S=8o<@XBqrH zmU(z2;asnAJ-vELXzz|we6tHC5phwK-H{am(DG*NYM+P7b;LtjY)RV{8xd=|e zJ06}dg~GzK@-jB-5Byfl|3g3D`R8<{+Fi&!WloZIwyeFoPZXnJTg6^p^Y5;09mCE$ zFdf)7iubt4wzr#fvA|-};99wR5{{mZSrH?Z&dQloOuVN=!&&%kPT4Tt&4VCDGy-R1pAQ=69 zG?ADeJz8oq3_yn7TN9(Sv)|5^N>knEz#93P+Oq&Qxvx{(*Gp2C><%=fS*DNsV_@hl z6Su+%uga`+vCnA40Xt}LGz~?o?3`lKwI`G6G^1l(5Y6{!K$Yv^NYag_dvk_X3<8;k zJiITqA38qaa)`M=_32?^!-unY6&QRI^3Q=ru?zf8%4_+q`%lXM)bqRnZ%QYn>7N8_tzu zy|bpi3uSMNd8FoY3UmNjk|tm8pI8CQpKH6n+tQ|YyNnd48_6~D`3;& z`d%X^CiN!K%9W*e>z$y@c;Bna_`!{IaOJRrJ`iF)eb4+mLOhDKKl0$t*dx~|2{7Rh z!Ks}MGHlwu({^r9X%6y6;hICfR|jWpCJjtEgX-4(waPesh{0uP&PRpzd2NlYrMd`g z6rg4GD>O3JxcMD@i^WjxXzqRAPISUdKoKg=;GNFYlwwd?hb=5SDPmC8srGisAlHEG zF3+Zwe;id->lc3w4RfEtidqn|t&k+)GI6E<85;g_w|zPjHEG@(9CsuFEO4!mkLFP6 zQOPxZ!7-@#z|DHE(bl#JN?T z+qbrhM+-etOi)6f`b?Cki&5`V2Y)-(6Wps#1TIov)LXZa&Z~IxRCqjLsadG3gVPNd zU-fF$_LQ#5O+%GuF!kl$PLs(-r*@Muhvab?^%4?q>aX&iE(ziKk~11Dix!c^40nZs zEo7&}V2MKt>V5wWA0MSVu4?IL>#kv@f4oIhlcHw>jY4DN(_^!50`M8a#8A^m>(chO zk7w^nK&@+Ykr7#3e~t5tmPuV^$$g?rgY>I00P6};OQhraRe%D@31aHBev?OCdkDTY zpyk@2GYWnX_#Hnz@q%t8_dp|9zeZEU)ix6)!*}rbZveYmueh)A7mj^y*kF{&!_~CV ze2v)l-PFXWAa+OSYz$1uA(GL6nQIeZM=I712v@k2gb7i``dgq9PRTx#W#+$H_SD}s zY3Q+qf=cE-B?gR&bZo}Yr>r#1vMtJvPCZM1*C+J*Y#{Eb3t-_WC3@qStSx9~JIR%r z$^v1~*^Z0BV(32ag5PSTp1`{8)P;sLPjQWz9d3sG`{Eeg>&@vqAD1saVZTAVufk4@ z{fSA#1%2k12&HessF|J0zulhfgL=j$NoV22vIy;stAm>Jg9Z5+t*jPQ{8RKb^QCLp zBIG{;n--pIjwlMYZlbj;2@pI^!#;9~ph=!N1fJ_-34!2Oi%r@P7fzZz8Ep5CjH{&8 zZl7~>Jn=g&=&i8@SyRSh4HcmHAsjOb$(kvpY|0rD6#qbV@SnUqb-{{|uR$%J~on}n^B8_zq=|L;rWd~$U@6C&h`v@X*}rvw89i=C#G&2{#e zW}}_U=WfT!NmJh18z#Ev5uJN4XYmH!V^jbxz>xHq^cvqj)^o^UcxzG9SBrUK#ShSW z%UfrxdnkNa6@-&twi2B&f{ZcQb!(XCoPw)_TpF%QvRG3JSQHL&S7>o)KBrXDP5IE% zh$gp>mz0`-+k=qnIoFB zDPz~P@35Z~i2Jc{xVLfTk@soQyldOX8Q~TvW!)SkvhJ64aM4;P)CJ}4shj#W1dvq= z_*bufs;9jFB5P93>*W4S(4XI9H4>C_f+zpF!wTau=XLu8E*6Q4<&zm?K(Ws?`Rd!W zYu;M2T#d6~p+1oY?;K7+;Wr6Ter*tPY3m+d{bgxQb=y*)lijLL?LqkO znqrCbg9C$e=FLoM0y>d~?oeI~@1}n);BPS|K+gTF=H-co;;comW@u!iDHg@oygTSU zd3M(we6XBmvvhrh%IA!%8)ZpJ-A`ba8!9z{r53Zir`hxk+Wu%00-E(G@8eWb6(Vs0 zs*lfLm`w{o@(Fu`Avnj6XK>&dAOvr6@{Ml5HmK)G^ zfJ+FT{bk@iBTf2g*A?GLw5G<0uy_iOKl!=hlPV{AILJI_g1UHho4sd~jU-pgJZzW2 zr7)q>ZQEu%*z!!?I&Ij*d82Up*7r!$F^0N3Ko~VGKi&BhcFqj<8w0o8wzwKXCiL>9m-W^-L%a`vv%6s+IYH3r4XEuZG)a=x8%OMCJn(OTkRgkd zUl!pZq16hIl}9|b+geRNQNxYW<)#~pEwvh#t=RM1%NuY=4KaA^i~foWZ_k4;$g0uWZYbRFxkjdu4f%mY#1pV{3{7GUepgOcU*cD zdq1^E^99RvZ;P9xO|gUD^)%`xe(-z8g5c2BDMXwV|S=TNwLAfPiJ)y z(FvR|FdkU!z;jTm7Xjvs9Q5241u-ag?keF7V!1R|vhy8Iad+IJ6_6$({%=LMCj>W8 zwVq#cV0>kB^gSl^s`b*`BJeqUX$Pg}#Mgz;n2rFKe*HI7m-;>)0Ww5<1al z@xqv`=LOOU{Be3RYLWVO$}(Gzc9DSEM51!$&3@(7QD%}a4N z@zB-)i`;nf*9hiuJn}R^&sd!*VPvpBLOHI#P#EG&P~QQyp!54%hdqF15e`!(tPuGj z6QB!=J?i(1I=oEe40o@c_zAK*TKI_B=8pKI)1M;#-=iK!`tTmTf(UQ(nilhFrPrTi zkJ-0)rA;rDl_`wkc|N&C8DJnTXg*^e%q0GXc&H<|%Wt9{KQ)$=I!IqOWmFhnYc#vP z)Xx`HCFVsmKQCm(?~NL%DsF2O5*j&%RA1r<*{xWd)7{T=5oEp0Y(u)R#eMYSq%z59 z6g-!PJNnvWP?LIkf_f^|+Gr8o&#~mH<(2K}VmgX`lSjq8J9}N;0bMHzszfy48`{|( zV%gwBLoQ<^DgcPBHrQmfazwpI^KDt1Hd=z9gdLSj#mcoZvap*TJ~*Jh@X1-)NJ%$LyRYI#Vvf_~_A(M(Pg8$aGI#aTJe=-1@$LsFug)JFpjh2Gu~t5;H`|TDN}3d;|SynotaV%D_)j2 z&;@XjFdFso59zzy*p^Z0XHzeeZGUiGnpxc=#1qlF!y)F(N7kp$d3m5Y$br(n@Vm_p zL{Mp8dw({ky$O>oSi&YTIAsm>3H6bwdq0FgSRF;Y#N|A%?=uJIuBAz=GI)aZt4Ra( zs{JoY12IVa;rFg>Pd;q%ox!@SX0uy05b*G*tv{!^CE;{2)^+U}77bY`?fE9PG|`q6 zyOagAeglc|E2in380_Q&Bz>#W`Ln~g*6~D$>Ci4RDAOgb={&A{vuOtLb7#I3^OApBpTnuGSt0ToOJt9s)!r0}okBBD;hPqsOyfx`S?=;z99FyS8EhyLHa#;J$u)1>-BqB=Oma;ejJDEU@u zQw46oZt?`n3%744KN^6QFnL`0$*7Au+{WTK*cneWHumS!Ix{HLTWI6;;XTMwS!-`9 z=6?i$_<{F}koL?1x2x>WrjFF2<^Z&eG6or_mOs_cLga-_bx{&P$ixZ!uo#NFDM+_BX2BBMm9zSKuQF${ zNKs27M*d)1*H>6onE%f4w=5kiC&rap1{(-FiYH8)XySGoN=E~2_zCTBc zdJcG;g{rV0&Xst{vI%gNUs#lFTimy!eRbGYw)S&mPtO=2`nzad-qvzZ>`FL3At#?H z7L}BuU5wU1@dlO>(Kzdw=a|UqsU6jmd70W)wzMFoNyih&c`d)MabEaMTgJxe1K*$& zd!$IgI}{<4fPnkl&$RLEPmfhvoo#+OCUS`8>M9B$`g|$1P;>1dzg#5i;!1gW%>v(9 z9M@KRo^^ht>2f=$@&q4?yph|(%H1vXdy6|oIXzlS!=MbVPcfCJq4m{ePYoC%MQEl> zvQfTSO_Ud{B>vYZq`}Eq#f(#$y!dJr*r%ci0w$HAP(tKtFS+Qk(zEE6lH*3I!VoQzwCjvR{ioT0KtXtB-$8j@7yN({i}Eg6 zt*hJn`HnS5Q%MlTsN}8yVUADu039M#!kg~6t;+Ex(~kaUE=GOv z$BE}dXHNs1KPhM#R-g`}SZvGEJP&UW{^SorH?otlqRH9h*iuq9LGL1Cs2`)9n(BiY zc-CJq+7`2z3BL<(!YcIR0L-h=KbZ#SN)E;vN>$y1B;Orx(%#2)IBkeTi3Ll^A>R*% zG)pBKw^OCt8R|BRHy+39^g%@rnx|T)#xA|c@P)$dqU(j;w`>pJovl0l0fV(K_vD5Y z6(R7~nSa@HlhKW+;tt2^EvLZU^&jf%3+>-?!2`;{SSM!YMP#{bfDK&%Pl$#0G8-3weKf0_jq zu_iA%rF_geu;AdT?tMf^osqC%?4P|-iB!~8E&FUZqzuC~S#0a~odc0wWpILSfIyw# z+k$_VXdN1qQMcvOB@qMXK_($uFP)K#=6(}>UGiK^v_&KM+)2loThl|v28TJ#b%TfM zg2mO@DItT-q6E(B?Nq0uBKtW{mNjgIL4AE3+XI0pbloB$;%amb8iAqSd|vKv zZIM5qcqLdqw{bHv!g)mWBFc$0St2xQv;ybd*)5x$ppN5sVHsTZjkAp>3c4BM??xn? z4;Tml4Hx6NO2oh|hAnG3WuvbrSZ^~XYCCFqyhj+uHLZ$hR7M*3Z%4&llm}6Q>dsI~ zSKeP*5c584SR3Cv@HIUi?ye!2fzV_T{YWG>=6LeOGre{z-i{Y`P7kHok5$f)9(Low z#k}a?To8}*&qDLVn*G<%tC!z3Ch_vsqaP2Jzz5^eeT*A0%7+eRx12@Y^HoBcr3soL z%{Epa>$SjI16m-Ml9m*U$XKO>)A5_1D}&^8q$&?3o`0stMJO64gKOW7MTL)+PB~Qk z5BAPCD5G0qGsKAVR28Lg+=Bbm={)2q>KZp@VctDAIcq0qN2^ z0YVQop%apu{Xh5ReV+5;o_p@O_xxUGWj$-nIoDWojWNETuh??YiIrH)Y4cp!lymk* zujtjZ?TKA068w?VKe2u7ATyE3jd$9Kk$`$mU>&l0zf9+--$L2+H8_1{+{DG;PLj2@ z7XRdMnk0$zopRmnCx6*P7eQN8dDy%$*T~r6&X}@VkgK4Q1EaJK9%^){4;Wf3iMZoaW{YW>S?QfNbmE~4K{m~DSUs4`FjPYInY8IV+4wM$@jALrXS&z7 zCnB#+%AeB5P`=lRGlYe--cv-gIOs#vd%LWS)TBB{im#(_Q;7qX>y&Hey=r?-3trrq z)&n#C5+doRn>g&j3PNLH$@FVD4(nD4E{C7~h33;GiIFuT#jVAlv0^k8i{56ze+aCyrQL^;9ygi+oeaF=xiOfQE-XE3e)4)Y9MJiiJhlN~5Y zqCQqoc;F@lCjIl|(njZ7VBJ^NFZ9{+@&_l|bkSVeeTymTl0U+RUk19NM?OEjTM2hK zg0I~8H8&hZtLG)M#P1|fNQ>K>&OUIHChAdL9$rz%xGhG7;N+Hp;Db1w4KMeQ*OePL zRT4<<8^An&QzS?n1GkhN^j=Bb{glam@-R~%b&0O z02lr4#C+y=&LQqWiN4^sQaMzu|D{_cO00c*+Kd4o|5d*Gf&YbL0q~jw{}R&M9uRx* z0J02NF5Awm)-BUy6MVvY`0~~RTrOqW7Uhqn{Fs02>vSLudwUSiskq4(GvNyAycl+rZ7$#N;`MA7)Ep`V*bX!L5>d8Q>{4i1KYh%K`xvs zCP7F~i`w#d#`M~0=6adP-InRuQ^pvhk!w=Us>2#b@ zrD9w-JZ_{h)O$$DG^vmFo_$Y_Fmp3!+z-oUJSCr$_P=hliqhUx%$ zNZc6Ozpl_jkoUZdY;?wJ>lzy`TK|QWh+BV;W{O)E6UzVa#am1c zB*Gvc@Z_XSxG<##HuK`qyo8e0J+j4IpW2IEDiXfKvo`o{jjdZmklqY?VC($#62ZeZ z-4n_qd^UkR*0AMYA~nT#)8`cU+IbG6TII(bu1rV7NL5l_?T(J6>vErBL%(b7_{`)E zf^+6yg<{`emU+zkx5;fAFX9q84%drBwV=^yb*avUd%<(#8WA~-x9pR`v5w+q~>mQt*qK<>+(>EWz`Yob4u_|#L;V_7ML}L=ytto zFwECh6jEr@D1H6d7)JPIuWvMIhpMS!&t0TQ(7!jG=M{Od%_hhd<03J>ph+ll-t*$m zGeg5;Rtr27c;!;QXn6AsmKMi=Ea}``aw!oU&()i<=N%CRymgrayiPEl*I~0|C#?p0 zG%fnvJF!!F&K!xuq|ZTbgH7Mky$3lKq}z;b&%Yjf)S$tekz~!DV+y21%zPq$z$cMI ztX!$^vH)IFbFVoS=`NIoRjVVG>vX;Qs1?O|Z^f!Yu-ch7TXt;Fh=Xsx)mMOHHJ575tx2-McXT_L&YbpZ;w3kD^#2r< zRy*vTZgcd{zux;0S+P~dl(Eq=>7SPUqpDY|&PmwTQVF79q|GUR+?=T zs_4Jk@TvPs*YLg~VPf7?d2HaXhqQLZYrME13L@g00R=&GRBk)phuF6Bw`_~Zb|4YSq;V+;)h<&B8DGn{V4{ z!t-wp#ac5F)ofy*vF~_AZFN^U2bWFhO5=NDrLDn=Yubf7y!$#jW<21pz1!{rO{q=b zy2pQ@E9>=4kji)` zsAwEUdCZsf)TSELwz!?CrdKG9D&5GYL;n4anV(;L`gs{%f3h{L-dXvr-*dOJWpngD z+<}X=(Px3c>goAHL>9-Xj~X-`F4d_eCHO#V@B<(H_qmgqXWAYy*}9k0viqX+aWV3q^vC;m-)5-A8g3?<<Nop>tUv0QyG(&Tv4@Q3uXB?*ufJ#-|(M`dmQ ztQzFM#ZctSknv0HVbMcyZOJf9%jx}uEQr65$!BINXT)#Tl+Vd`=$>klHRz+FRa$9e zMxl5dEw2P}Hl;-STjAyGvr1VB#`^SzcGV7FdIN!@yZ}b~aVo2tfMgdP`mIiO z|CV#Oohe<%{AzWL{FO{LF(-a%_T8940$&GR-#?4u*Frb(X!`CWxb&PCSp8~#{d!pJ z(+j+zS?eB4VF+e<`R(Q2?=^NHhj-O&lx|Cfr`c9>g}R!%XG}0tlEYZPYsbnOl$|l+ z`gy0Ot@C+otrm995v`eC{^GfqZT01YiE41fVDH}!Q=I)KCUZ=`>_Si{dMv9+t>jyv z1FJ0%iU!vA#q~x{f4Sl`fw@@4%wxJwh+FfX>*Sz5yVE0jvjVaF!UW+dDyM^Ku8hZ5 zg`zoMnmGEBp0{OWm+!48rS)w@J!1LyL%QboZjs0E5=~~0p%ToWj`mi`=OD=xzXxzi zyt7zv4`QB&v)m%VF<}8Vwri}WRz0B~kvHSNYpmf|H*KS|R(HrL{Z{IJoL;iDi3jnU zT+ODeR%LJeJ@HwEaP!-sQf=jwj5YQO^-s-#=Atmrj&gy!W2{?Vx>+S0n&OC8Y`d+a zAn?wiEgp{Vd&6F8uE-)X?Oje`b_Ul4IzxTi%z?!;=2Lq9dX)oS2tge=g_|X;=UPJb z0*f4xL+(u{jnc3;1<$=1dheG1X_tO0b72lWqVo9+J^HPS-kwM;@0Lk#+OeSftcbbY zuiUhcVbG*mKtCJ&SZCq2&*}ZhX{pU~#A_+H`PT-+zmRj=(e7Kq4u3|%=fC~kK095d ztI^S(FBXvE0OjA1-pY*P+dD&nJ+ztw>i8->0*Ib^z%C;^gr>@5R3LVG@iSJ652&R4 z+JdKdYrI+bjmHCDJu_bcd}C|tP}Sp&F!2;Cy1M}}SFCxvV(ruu+*`yhDxM@UJ*jZ& zT=8lH;p!Sjd+6t9n!V<)qcu6-x;#l4ba*uS-C&^SqzBq5ySm8)g)9Af;R6d8OQdW{ zN$}%VH#QkIoeEHQa!O(ocddMT4##-$&dWOwB<)TZcok$d!3$anZn*TS^#edg=#6%j zx!3MWDY~=#cWGPdfCA0ypgA@2WdCHkKA*$+*6qmn>Ot^ss=mmeV~m~;f5r%CBaXNI z*HZ0+WtRqWAj+~B_XZRrHGVPP2DDFFJn{9MAs0F9u?)5i9KkzpjQactO-=ysS{GE} zuN^GNSZQULcx@v^RAOzieA?rCKkZm<#7Gnx>jH-^$GPJLcB&Pq1#+e$o%VFUvAdin zGoehQA0o^73XuyA`4Y<)o^Ck*UADM+U2nK|{dKQtS7Rz(c8c3iq5d$w@h;?Mr>MxZ zp6ftU>is#dp@#vT_IjD_z2vky!(;1DZ3&qR$b$~;MB+}1{mY->y7)2)=xrIiV82vx zDWC7LZ^@OXa<%M>xOg;s*~4P9?x}a&?wb_l5}PIk<%(qa|GYV+A|G8N&d~9!ahv`* zJSncMuH7dgrfyJ!o$_D=MElS=P`R!@vWg~9==?H5Q+;xo-OFT5U8iK zc58=3HNo!>|1-Pp2F>?G^}V~gFObC1fBB;GIV=2XduWKmL;|)(HO0*$6OgO~%n4Ca zOMZp@)Q1xrx@tfSI~K~n(t*cp!l%EB)qA=c)4=+{>s~+=Z~->(Sjn^))AGXDPN~|6 zRYE%@Y@+blI!B??x8oaNB$#H`OSj))QBla-bmLGaTL9$X$}&m;pZ48MtHC-*0g4R z*7!GRr6Vsmtihj%qK8!vmFCTzIu}+=GwqT`YxT5i&C+xpJQvfn$Y?7a1<{hi=jrwJ ze1ug%dI@lI6XmO0W^w~Bo$}c0Vg_nx+vdxg;O;uklL)%u){UiFw&f9o2UJVB1RNZ_ z9Tg9WdBSg>J~DJH*v`I@)c)*UFMfYo7Yl7DB3;X@r+d;QsB_?Py_Q`B(HYlkD$mO{vvQV}!+UMDm<`TUoUODM7UUC?z zy+fs=Np!7P5J;%nJi@1^ zL{27=@~}E$CW>i8%7LCpmrwR&9(ya&Us_flqia^8W8hJp>7a@De&6{HIr-RpJAZkR zH<-nmpF3@XFTLOchjXUwGpnExE^n|d$9*345c23yLkaWk9t1~GQc zb;^5F2)IvqS-pdbfM+Q8vFrTsJ$D7tg}ppUPPg+Bo~@^I6RpQ{9-E?O&S=P|;>$lH zbf*ry9n@HH8z0>Kk!g`=gw4$MI@z1R%bw|4ZM8IU1@(r)*nS2v!>BE7|oaWp*1i%vneUaE3o!%@8LIf0hPIPZ2 zUfEK28sQ)bZ2J|3ktr3~Wu)n0`=}9#(m~j`wuwhC-Pdv&X7wkEKY#>-`P!XZokr|V z&Wz9G%AiGNgIvMLUXMQJB-+Q?8^fBQ4flYD-!VS4ktb?o_3jrQB|*;_)YzbzE}93s zq327wT+VwwLsoSRX5(`(775K{O^nf?iCF!ZWY`ldvx1Znhv7n}z+ZEUo$D!>Oy|ek zZr5-hUOk3VQKR2wOo;cNv=YjkBXRbHmk55?R%UYvZJ2W#m=;uOu<5gj zdW0GAJxM>f+2^#QV`OBn`EDUT#;&>oguX7-@Nf9ceQb9Kx-6ZzH*u|;=(`e`_H74; zYX;&1diu4uf+Uu$iyD_-RE9<`RFf*SLB;|VI!0+=4c~vtGK+L9uMPJT`%)OU!h?cM zKQP#Alv^x2N(VDWDQFh{0$=DxZjx6Bjtq&3b<6r!O_p5sCQ-WvthjZqX)7P|l!RDEDO6I!h zS(fF3%E2^cOc{*rnyZExrg_=wj%ra6zqyCC%#WK3;Q1}(pZEq-gbUb*D>gN~D&A~B zWf-`Jp7nYM!$LiYC=Jb~5T$>iWBTMYvIfM z*WE!rE{dh|cR!!wnfl}|_efvoH|dqBQKQ1GB9rgL0UP< z5i^+Bv+4#j{Mix!9a*OuCS>|(7)sqA8F1j~-Ykrp z+c4LIux!!9xYxg!3Jbb!$+^e;0B7CwCU4@_gn0(u_>wh2LiU@NNUB87{(&92P3l1NN-kB%)n( zLLiAH8vrqIx-}aR<+ju-`#y;0$IK=y-Q{I&sYdsxItMSvi`2+BrxZK6dtOxDX<88e zK*C7&LBe8h0pZW=r8wLQh z@}$BetmMDnJKt6N3(yK6nyK*#AnRN1E_zQ-J0Kn(W-s|%l07-s?T`z7?{t0p zXjx9^+=(XZEY}$o=m&lYqF%pbe3WGJ+9i2zlk2W)qgKfP%y6j@`@3tnx|#htDOyIC zL#PkEHXTe@%XC9N-!Qh7OKx%W;5hI0*5pj{edM5`cf>?Y5&nalsC;4)%{u%>BmeQS z@74#iw&?IE_aN~4UXh#n+}a~E)^8zZV}jomo{aK2@;2Zxl@$P4I*qAYQplVh+v>o| zxvzGU&wQOqS_C54tJ(_IYoOyF%oSoTq~!}_#6qPWmtQ~bV;+Gj{un>npQ8P^^|>_Q zsR~(m1B#+!*saewo=wVTiUW4ie=3eIoy5!@C?37i#Z~@lTvkktX7Hs;G1p1Zf=4hTjhT%+dPsSXXfMN2D_h92<-7P107W)ifY*J(eTC2cq5yw z?(P)io|!&}{dR~Cf1SV7V8rSvCgQ*J2JM}dVGDP30CN-_c_rolN+wucOXaaKXlL%>w(hrN?P?V-R4sk+NvE;+jZi-nmH}$d3V(YRa;z0sqIh%)$&24PV5TOh{1QtEikoFXIC#GjLl}g3;j}KqjS7@ zHp!D%L2X196Hq=W7?8OjjFEC-x}n)h0P36HDHi*s=H!$mzG2p5k{;snrD_@RzQ%FD z8B<|h$Z25YH|ITFv83*~Wwf@LHS)$gJ4CH*sU*_?bfSJ`2-nY+hpT(|sD-G}*7cTn zrtNS?y z-^)Fo>Z77Ee+RyPpLHKBCj7%)Ra41FB8S)quM_qIW`GEO@rhDQk2Wtpl0&8(3`(U8 zyqtBUm{puicRW+mZuwZQdOw3#rZ@lxpH>725A9uDkHsNui z73`G3`s_P#o=%64iI7{de4XzROCOo5!b5G!%>-KeoqnM1dqhuavmsWWc<#;w7VXpN z*(TOv#eBy_nd;JU@e^mNe|nW>#`TGP^>tBbxfvUHotzP3>M%8)Bd|L0o;tINLqJTo zEkb!*DuQ!#f=1kFN}y@}!@$YpXaY=jf4!liR4Wlx75-(8mWQLkeMGNJxys7nn6p{a zYte~$cQE_|d0WAKmFnWEXX-q~zzomj;Jjk}a&^Y@gvQ`!^NY;A-zSnZ6=-c!xYClu zK<*A-^^GWsU$X^&m5zFL*LkruVa1kod`}GVta70sQn8;nZ@&Bk0wmTm=>?_cOh&~q zyJ?Z){(<8EjzkeJ^QU=^_iX@ci(gz@(+hi0fnkrk)lP5MS!ZLaQ*wAK76SNTPr^#o z26=DyiwhPvuJ%bhqfP&6o8$GUUhDz%lSh)%WA!Lpy=qG8(qUx)8CqAp;cjiia z4~G#Mn;B5)V)s^?ViYhcf#W(da+eP{J@Sa(RYtR+B2U((_ay3eitc0Pihx*?wxUF> znsHNExu&&ojwnmJhq)GOk}Wj0`T9*K;@27Y=-23EpUL*GAdtpb5<=I-XVL8!b4!)U z!1=4)l!k|!A1Y6)OC9(VTzUH`mNSR~nm+3-R^HZI8`f8bB*wjw;&*|!QwLOzOw4sx zMfBgSE|`O4f5b-oNET}Iu;5=e*PV5;Lj-*_h1m{veOP-wJ2x@txr8{jqnHf8E~J`J zt^W!-hda6$)qgHe(jC)tzkJT zw;8KzN_Ul90H)jJ&LpK?)E;flwI4wSrEqYvdW{0ucH+m!+rk!UXJg^I)SiqQIaS%gQ z`XUWmd$hhbFXyAb%-eVGHre{7LPRoza%V1HhMbg z)(A{c@j1k34ue`{6B;;2^S~|d4^xZ8<($Bi(<7w%^CQYXua5}=kA_{RMwk*#$fqu{ z*jwr>mXqP%Jh+z zu-<^5&5EeSocI?@pNTl_k3%o~<16Nz0~;#3{b5~mm!GVHt~{u$G^+|S5BrD?H&*a* zhHL*@pOa+{Bp?E^(W5a#U0y|h;4?*~3~$PtR_y&f>&nN#B<;g_{Mxl1o~vTgX>4CC zLn%T0dvjmnv$YWVdSQs$W;N(s`oW#pSxJ$7=IXK5u}3qF5|is%VlD41YAiT{Xo#14 zJEk(G(Ob18Q-s&8Q@l;bT-g3#Wp<)aO1Y!uIP8J1P?SHVF{|04PX_^*YaehC5n8WJt$$q~?BLT}39__ZAlGq`ConPV*N9HXh5;C|2AwG<+iP zXvw+9xS&C2NYY8bz;}Zzf!)cYd<(8C0=dei6$FQdu(ep-!pplxRmgI4{ZPb zuv6UoXxBn>`KF4@tL-S#6wfF(v*gSCpZ1xk>nQ&|hw$e^f>I`h=Q$oA^BjgR;~8mX zv2hsfhzWQYhj-)Yqu2O9Mh*1c{DQBOOP~3-2pc&kD0)1*l6p>0=H+>j2X|E+ac~FZ z|I89DJXhZP>c>?ag`DN(U7J?0Q*n0{F<_V(bytF1UUJqzq{Qozk~8hu@}=pnKKi1j z(9$d#`kXu#cR&`oDTVqjR+IPzF&{a`yYFghti{JN~}-(TTXcJ?VC70<8g8#wR|3hs03tsKbVn- zaRJH7Q#Db6$EKr2hG%89s@)@(?(l$HIkPw%;`N8sS!}U({*$y8xUxkhjGbT}*8}Ig zN;MvbM+P#vOOaGWEuC0pk>;;Gk5C2vtcwEMnfmQaO182#d;#XMV7bYU4Kxz9;G0u+ zaF9}qoeskCJ1p+HFll60%W?sW)dS^a2F;dt?ftbG8a_sy1BTEK?l!neNM!A)yvmQE z(oPS(O3(CxI?l(sE;5bGE1w#x8#jDMt6#cQe2@R4U-mV?#Nm@WTBgPMo>0Tr0Xaa* z{tj%7H*B`5>Z+tU%HXkFR-{cj=U3VykSJr+w6?cw&*wSAmvefpM(Z52rMefbwkx8H zGy`p;TWtc?6t*aNVwHzFxjF2;(r+=zbq#MI#)y{Xrvv=A!-2DUtNgCVY29vd<7On< zGKJ}{R#vwexbfXEGvMt)@hJ1ys>CSi*xG#%t@TgI@?nm$d$5}Hvb&C)9Cd?(bD|*e z7{)1?a_iEJ$%5&TgRnWuwq#PUO~2X_MsQ2iq5RzvY~QMZqKV_s2dPeLm;cOZw;Y{Y z6h8cuwuib0^~HGLB530qCdXyUr$oBV{4?5#!!>Cj_`Yke!klbZLgS#Ig1ReEA5>W?l4Jh}$P9)@aF zo_!YRm4{XK>a8VG^Cup7*?%l_Mlk${jm(+bY=9#(>iQ3NKgY^C4kWPtvu}L-Dua;@Ge9f@Mp;r7Jh)n2sZHllo^Bt*3GlgCzS7)h~ z)0h&M0OtKB2j6-4R^4fbxO|YY^y232ZSkBpwv9?vu3eNpp}nqzITY6Bvdr}DtP2y! zQ~zT1K;O>Yg)|mCXVyR|^v}S;FzAxVv}Zr~MEr+^*)-PX(;eTJ=7QctT!zbr(w82_ifMB84HfP?_yDxl2z<)b zdE0FZ6s$Hq6)1>WS$Se?Yq}?C439IV+@g9-FOzclKDkroQ>o)$SVt0uc)$U>aq6Ml z$hza`)>j=SiSWhict9top#cf|NGdq$Xs*9q755iOtJf&Uni%a-?~|O6Mn4sSX1Ybv zB69eTc51*p$$z2kyG?2TPn7=uO?v(RvuynT_s{;}*1s)LK|IYzM1uhUz^Ciqbk6_d zo&f*v-}nDC`-A`7>i_g7(eF_+QQ&+e`kyuZ-~2)Thg*95Z@s23g#VS#10;yPEtQ`= ziDo7M*MfgNO#lE(q6yXiNavLQt%vzP1Nr~y7xBl(|3#a)P27#~Tdte)|5Zyg@B++t z5;+p@Qv_O#iKU{CW<*!5|3s*N@f-gW?U?>ML8bj6v5eQr@3lHb^o#=R5p6C378JyG z>y?22Hd+6fvVZ-#{+F7v|6I&}g2;bX3L^&8L>k9uC zJcyfem-AHD-c>dDuCHczMPf*u+KNykoAgU)fK$0wfr?~l1Gke;UaIRs;Ig;bjQ2=K z^G|@wQLH(rc&Zp$aR!35jKzYzHDOtsxuWJ%_S`OouJYSn-}jpbSwIaoHB>3vj{wki zVgiRHE=5$;0A*1slZFtf>OJI~mrGm2So=!y)b@yqh)wW6((cNnro0-0oFUiB%6kt2 zmBF|CES4wr`5_wIW`T9o&~+P*tzC(yGN#t>@SeVIwO~hH>hNLQdC+sRn!stnj>SC< zN}@j@kf=%%tLXPYy)snfTI}&>=PS*jhAVBX4$yv75}TBX*uQ!$2@Ez2=-s$f+0w8t z;yN1v3cFh06mv9LWu8JgvU;DkO!))a8CI9#Ip#M0^d>KsouvP~4Xh?7XM6s4A-!No z-r~%7kzS;twPs)SI=>~dT4D+3ZejOyC{@P5I|9w{1}2 zlfd*thLQJ}l+7>@aVWD)XMjrfnH&;cP|=da#EI%>>UOxP%hMwEnHZ(zT&$C&MCDT^ zQ_xfHLc~2yAFBCZyGhrxG81~JeES9U-NnVlv5d)brR@{?HZdwHeQ|*@Pi<^&6%JAg>ZFLM6pd~xn@RO+= zaLcBb`|cqB6cY;T|2U!y(UexU6j!gVx4?MnHSd5J{bO$Ju+>@W@wC;6Q}!aR3X-Cx zmL5K=tw$^jb@|?ebK8;v+$8N5;K>Ev_}{S|&uCRq@zVmHx@?AXvlQeGP|=~s+%6EG z(@fZVA9^no@k$)aZD>bhACKh;R3(;E(pE;wlf1dQ&J=2onU|~)z^()SC2}^6Kqkp9`#wrvx0H2pk3SO>oBxa)?{=`S0xV)17RrCReWA|ybP%pP zl-cz|R(mIAtS&oqJ2ySlRrtTATh@&1?*=LLtX1qLpBfH)anK- zi#~ea{caQ%p&jZAH-_^CWF$%(ZDo48#%G%N==shY)86^7S#ji8mfR0wVF~J~3bT8e41fN*0-e^qy zZe_m`=3WmMn*@%P|1urn_eu;NPwy?aASz(*1u}=)L~x=|4HEA^Y6Zr9rVb%@y_s*5 zskF{^yU+*lxk-S&kL{u#39 zuknNcNxG|3dNgpJOT)o<0jVILAdmIw?v;vRUiW&VA5=#bgSuHo8J}p)zsnlhom-IduRK{6g=_SrTkxQ)0{B zqyMpnum7DdjQ?y`|Np#Q#b)j65g~URe7-OE4#|KwbG^q|v0TWn=wPPpU?xTYp#3ki zS=k!=JrxfqBS$|K@Z`X7SHQW)8wjzc2Y)*i@|w9TI>QbEy;~;%SiZxOd{3A(aoRt* z77o3-|G8?0Vzn7Pp}LDpOj^_Xl-cw&2i<#TN#|Sht8vOm==zc&6tO<cg1}v0pE@4HIiwD zP8MFj$wg1AXq#=V!%!88;T0{XR?C!zSqi3RVz+ssrH`(jV~M7K@*C#m4*(zgs*eGG zxK+4C@M88rhVaW--<{*%7qx#Vp?Ot=+U=h)It7WWMb;A0Y*xkwLgliujlV68e1r-Q zY{XPwY3Y3Pf1x?r7o8R&)5P;9khSs+(}cFrQaaJoayNuG75kDCYp(uyS!J&BQqWei zFb>V6eF%7J?&Wg%++VR zU{W>6YVw5_%U=HCtq)FwbwD83lR+kXSbbI!s+gJs9;Pqmae4MGNEC0b2%Qu!)9!NS zG~kIdqjCwd`{pnx;t-p!k*gsFGw-)8U-fGIH4P^;35XNzHdy_PqdczaU$|4N$$q1K zB&DHaEb;q7s6avspRpd}+5h>(^<=HrGZ1rc7tLLkn8Hd%;aVaO70GvDc!RVX8Xm}* zBBj6dtd$*gj4vCKEE5_xj%c#$*GcM7p!QVd=A&fD#Nf6}Th{#=*IU%`W9vtNQ07e- z#-#bLkJ_Ez86uv}w zpmEOn+qmGKpfOJCcHJvM@w;(1DYHr?zouZ^kK+g;AG$_66jeehMcL2mEG zTO3~NJaTsT)G@8bWawcU9$tUvt(Y}*Fl2$61yyX@p&Y8MQ?y7xFH;`>@MiC}WAO}7 zfa#goHN4m`9~9Ob_PHAfrQ`jIt&!nLG+puvGA3i{0_F zPN@Mo6cUTg#13Ts!6QcXW<8CQ!$zG?a`)Y?HVoWnYTDN_epO{r)mXu=3IyV2FD*U7 zseYY|XqfBK$~-s=BVx_HJa_6nN4)9wHr0<@&E489x_efnJLqlq9PrcSLV%SGrn6Fy zN9!%WZC+=7))PAONx18Q`UaPX@4*fZ7v?Narxo>-uOq1Ia7j_Haq6n7vY(kKUl{IX z&ling)T3b5)dR@aa=m?hkUH`LF+c6 z2qp?q6_Ipl=TG|g^tPf5Tej2oCFM~RS?ys}TApXFk|%cKuu=w^Cid=I({apo-uMOD zj3x`-abRe;n(G_1N9y$lrRohgy0(H3?0p;QDQhPghl+6v+EsY!m6|oo)`#u@&s1cp z2bPB){WqugxtylEZvnp$mZO!GPDYTLYw7#G^3Vh1+>mcux3+l!bCL3n_L9Yjf=8bh zcazqJ@s+E;*hSa4OxEc6ro*C6SGz+1nPuQY?4SM&5fKm|C()sLZolVS&Zo4E6g_oq zJ-d#PAa$iJoy(qmy#^TQZ-*ya|7031iEuMxlvBdU1Z2*p_`48HF%k*tAi?)B-nM6v zn}?=ruOcpfkM_CBx37=>YU1ik+1ZR3nQ^!ckbe+d|Er;aLV58$IU$n1Vyk8(oxb0A zVe~L$&Q=KD(-d6}+N+N)2x^EdOxl(Vj&snGEw0+Jx@m*2%jNGc@s88oorda`Iz!Pk zd==yMvSWj!z`3;PJTQGy`;Gp3%BH9~tY8?)$A?rk8R0MF*s5kQ=GopxPyqfsRCBek z{>r~LJD;{L4hpOiUV6wKeC!yj=kFpKmD0LvyvYD&2b!swsHOgBGF+PLVf``j8TZF& zyV-BFMKgL*szE?f0hr}p!B{^oz;~L-fy!j0lDExT^KCkR*ZZ(ISNZW^EZ4Gc%iNm_ zp1z&RVKv>YMSZiB4L5;7g{@{sE$NHYDfPK?E~jR>A|T*>tUOaN#ZCd+)sIl1tDdjm zj?W{Bsjm0p3<4&CjUPG*EjHs6Wce@C#VQ@=qO=pllIu0l&zzHlnXvH3Jjnr|%Vj-y z5}Qkazk8e5OaKs+{8Y;(5hYj2tK~AYSRVxLX@^bAQOw|AAujUYNj{D$yHwt7dBE)+ zQN5tQFCLH#u3P@&RR>O9eu8+&Jp0z(m%8%@W3_YG${K3!=ytIU$4hx7Zv<%uB=xia z1jVcuF7g56rZ0MjO7|by4|y%GNW_R8q7M?2V?_kaKGF?IX@M$^DO?vRAaW>C*LS9g z685mdsJMn8u*~}>o2A|OGOe+|-YLKO9^XE|3b__?E$g!W$Kd{uExN-=y}8EQ!^hn8 zz=?v)#Z;BaZP-6E^>Ea@=FS@ii)-1lP8-IH2$xairZDFO@u^JP* zGAfN>g+N1qV7U10i?V|DKPTKji`?@_M=v$o+Y4mmvmZY$IUo%yi(U*hfpI?1Ruyzr zFeW+=y%=sUAa8|B^wkhpI~niC?n&*}ZqQDpc*RBcy%}%RL_?(fTg@3HT$tTN4JH(i zA<#mT)Pc-*Ex={hZ*3GsyXEdn4X|ZHvX3TJ(iV_Iju#mvo01BGp+0uxm&g7}Gb1sd^7wli|5dFgs(b#gHmM`ArI-;fVqPP#Wru};qamt)T=ty7GJ>P5ej;Q5 zfcfvDQVdeG9U^OrGZiNO`iR)4hMcYpxHn5IjQTN~c!I{JTLn(mGE|5vB3^NgWOeb+ z!V@F=W?MzU`Q-FwqAv4ET*Qbvw0Wdy2ucC~EdCBJ zWf&J!`7&pkMf*9S8G8YQUN)@+wN@}NU7Y<50LX?rS&&dc6Eo^RJP=dK1DCGuj{GkcxD5+*`(XppQwrJHaS^hMyit&so$0s)pN8DTa@-^m z^a*3Ekk&~;&*+ZAWv+SxJ|pK3VU=38-5GeL9il^M;5O<4V|?+wf~tgTR+%S(79Hwz zsTZX+_(cMSY7S52Dk%~AQI1!P9Y=%6Ym*7K`HssT3j&@s=;se|mf_Hf)=yJ=62}t? z$8iu%=!prwGY2nM`FPV6@}>h#R>XQ0T>1Dm6$TYz1Z}@g9fAoQJ%QZhY@aK~HGz>iLJRo);cWbuM=A&*J=W?zDC z=K3OH`}{{&!1!^3BmuXPbMZ}qKxfW3j3lsz>>-mmZU$Kixe5^V+Z|Um*x(KOlfdj% zEY(_OcKrli{BG=J8<^nmE~e;BCG1Pg4W&oX;>+FCV@0ww7FS= z_HQ?wJ;4-t-UlHZUbG+L$476@35p^6*Mvp&p34OVY(}5VDw1#rA!ruuvPSH*W84Tk z)^3I(gebDD^Q8_9I^^h48oclmi`oUBIjzyf_WP z%!KUN@g@Ug&l|ugEr4Ddh z;fiCFfZwR>F#hah$he)03;5&aKWjQXPRas9QimWztFPQ3njvS%l#@XG9|9@`vZjsy zQ9wAOJ6XV^kk4*Lkc510V*DiBf#}0zT`M39`{KeRwkwG4R>`$5hp4De6#fXr@A?+uN8_E7R?*neXna7JcW66)SWF z-aI|$II@n5mMy1sR{V*=P4ZDj$PDX+66&9|!yD_H;}CmUtcXIX!iDv?FM;poPtLVO z$frUHDMpIhK>o=Yqub;jY3StBtxIzqR?WuXN@v*YIcW~?XT`ZVVvf^Y@yIP@iSDM&hqJ7ia} z@0_Cv$mKNo26xlDI0?Yrj}m^W9RD0u`xz&_FoKn_*RTIdXzQ2Os$MR4u6H~i#Isfr z;`CiyrVMQP`VHO<);sstbEv@Hr%7w*kokS)pj!SLc>kAF4eMqn;MT|Q{1G0)9fC~a zMS(0=awlk%_h^=8)(6xpX&F(hf(+YT?z&GSZ<)YfS^RrnxbY)wx_#uG;ST; zGKZ>9QwL*PUVSXlyVlMSUbwO$v2?Nxx|rhn=r8D6sBDhaouT@ia&s$W`LpExwpPH8 zosM&*rsyMX1b~s^{I>Q$$4C{;O;g1H1AznZdZt&4`LnIykJ>LS{S?mr56s^jjlcBs zr}|nZtNU3#{7? zdXw7Mz^VpQ(}6qGFM^nEi7V!BB?n5heitni63kZm2U9~2FtBfzmbz2JW_jCjJ}qNw zs95Aj-KhG}$9WbxCY0#WQTUy)qdgSXSm|_Skft->=wJmXC8ToKKl^~%J4alh^jK}zzukEXt7H{Hadbc*&?`Ylr2V|Q49Qz?+l%3vQc{WJj-e|# zL%=SQ_X!nE>X7>uSBM!o^-Q6su}x31XYc5;KiS2wOj;o6Z|2}MnnL%`g=3uKdL&*pg3S|4KEJGt+=#6ad#~g zEAERGXK{CTDaB=RYjL-w_~Py?Rw!=8rMQ0d%G|lnpZt0=IVZ`9O%msa&B<$o!_)r7 zHT^U_F9NFAPW7|>dFJ4_pBGivM}QZbvjt$L`fVf&<87N02Bht6RoD^NIsdN171xY3kc9MfAN`67J2*3dVo( zLPu1y@V^ESlsMp=}V*Qm{`bI)e7kyEbvLn zC#PPU>_@a5PC4hBF7vx<*?Y=H-;}H!02*c}ZN|gcmQPJ?nR_1) zdrUpMe?iPzT6jOFDBcPQ*-}l0T%2Xj!nn z^9c0;Fp5`%KQGVBl7RYcoRf=Xa_(*rGc*n6XfGj)c<;vDRPV*$!hk*bu{dOwx}$OP zI-x#6J<#m&zGS%(ioSS_a;7|o~orj;qj{Ta~i8KQy4lP0aBj9CX)?R_1F9?dc zz@nY>gpXV`N*GX?TDo_~nZ#YcO-gIs0>Ime3i#m(i174icL0gIF9yVd%uZ`Q3aKOP zj33BH5*KPYj6P}di7%6P8wOF^yDr;aT@=lA)Qw)YuLW}5`3Q4(nW&Kp!~KrhdX7R8 zdRWV1(_A4Fvk5!_V4<)VN4D)-f)Kpn)qAN9VMi1F0QK@8glc>YXI`>4y26X0FSy^q;z?fy&ol*WQG%$gpz>dEq-4`?qB$3Nalk5HuzCeF}a z>irh0>V@&qdwQuyLl)I>$@TQLkq&i3Y95@AY8kGn?FKZDSfwY+y!Df4<@U;G_w832 zY4U*0PrmPp|yDxKVA>ULI!t)H}R@+~5yexBwfFoGF{mSis9-H6hFN{Fy!7>hS)8

ki}I>Ty9E-fH(IyYvE}rjh_-RyL^6T%Z?Q1-ZHxb=2isXa4~&HZAk(r{j!pvn|qCU1M60V zLC2N%)2P{rIW{$seFE!7{oDZ6Yc8*Tzk~D9zflMrAJX0{nI6knKJVmw6}xK)M}5eF zFajZj772??UG1OAoJ*&+D}&Mv9nMMHRUiARw+*1bEF_cZ`#oKh$CiDcs-X1Q9sz{( z`+wC|f6srf4p#6=Nx!;KW`{<6%E$xDa*HKmd3*G^aG82`A&OKy9_iWg?-qn(VdJD+ z1<5S3v1~S6hz~E_B^aZ?_er&!#l>X{KOLyg`Mtqjul$7pC&g!FsE17{WzFM}Whl<3 zbjwhio)0W5&CLo=RR|v=@?X8RZo>yWaBr0j!v)P^XjQF8dK3vg_+6}7|Iu)9Uo%yP z5Q7u)C|mOHsqJ0b_EV%Uio9f|Tb@W9ZGd9QrMaY59^IxRzn z#k3B@T0&>h5o^86X}XcA0mp@x&T+u08_h!7zYt7?p3#npdy~w9wR|Y+;q&~HcOMpRC zmBAgULzen?rXM|lJH2;5^0$<}g_7n8iT|`#sf6?$+4@RD?H@!?@*njCCVQP_k!am9Df7XJJz;r`0V=SSjstL4mh3c<590iVk4q&ZDe~&!$-`TYuZ7V& z<8PEn33%ibvJawfCn<{nIDEl>Cr1taB&_p}dpTpL=?4`n?C~CJRMia4J)qmXhQnaa z(9G6WGCPk{A<9V8Ln1^GF>KrkF$0V41JiedVOqN8qK8gnIJ&0`mU91;QHtq_1+lf% zw{ID25A7$}sHqu`yQ*%6&Jv`H}e@QjL&$qLh`{nWM5!pU-ojy-3}e(5p^ zKg6mH>OBW%c^QTEk)LG#{-GfsyL9<0Nrvu-!Tsl>v-@m9QGJO2*B*He(*(P{F>jVq z$i0>lb(t-Y3s}C>paY{F(J-wxN4~nn-LBIBMY10_MvBLXy=yb}IpG!zns3OrPcdfS zU~u@|r=5u^2a(W?tC*qecsze=P;)ekA=YJ6t$EB@T;W4n+q@zxX1L)&^tsiea#4); zs`xj49i5w6%NgsW=iHbiVbPXRNeMioIt=z;G=4x}TheGKo9PO(NKlARbHDkQ-t7*SV6xDAYh;1X~?3uNTA;tHN zs*t3V_;Mg1jhEtxE~qN8b;PPx4ohpW+UP9WB4P)otxhfoP3|?q*t%oxEFPpKf@z7; zX}+9cSIRKYZJ+}fLrXY+;_nDDlk9X&=A{E+t>`UN^|`s1pPZP+Hc?&*17ke&^mmB` zRIIqwDY{lstA`V;&=|h&wJEKU_~!}gZbQHIMsB@(x#TmfTaV!OU67hxn?U@jT=YlN zV|wS_a)^nEy>+#TX2;1wyJnH8Fsxm=#3SQ>ts}0$$of7p66`jy)i8%YoLxQ(q&V*< zx2B>ja6N9JQ=&5GlZywa?p{Pv2k8S|eaiu-XQv9Jh$@;Ll!ipvpYpO;`YuK>E^-F- zZwn7#R4hu)_)Kb?1`%uiAU}tCmi|jGxcFgo#n<6B#1fI$oEOX?PHx4|Mr5$9ag^T3p#4An2NZJwf%jY)#DGyx9YY z!AGAmK-OJ3_foGqSN!q`kts%=;r8-TbH9%r>?c3;ckwZaa6Ef3M84c;9PyHdc<1Ds zUB%B-8qIC6VhJT{AwgI`pY<-@c1HMNuw42S@)t@z8g(50_4l+gy%$sM5=17DLc3eg zCh5<-lcTaTi89Arp1bPTzL@L0_3~_McvC~`IhEz67Rc<9kU|Gj;N`1T%E(*4xE=d& z#QammW})<E>7J528;%`lS) zCn`-K`AG-b+`&R$OOe|OqrU52G4Y^Hr6<~Xm@4m0XeLbwJv@kX9r9o5`6R zcDp*FT?^O}6VRU!9gh4es3GKz8jK0ZS=W-HQkVw8ZRSO#E+`1T{>SKhK4Gr%uj0?i z5o@fg&F@Grb&ZYirypFB_+wz0v%Qr^(hj8bfUL}~0hnDss1$_Vme0nSK!#6$u<^RK zZ@pqD7&J=kEAQ{}B3*nT(nqui`dkUK2X7b_aJxNRDs)c9k|lZU9a5I5b!5D0Q`KR5Sb2$OM#O0f#MxIQC=7q^$@-U z=Xn7p3?EA?-{%JTvjZ?Ks}(#?O;t+71xjqy8$CepihSXmq+u?fta%P7>Y8_xJhGtN zC>He1u7jBoL|YOq-;}?Y@2lv*jy~>4BO<>YXsR)4T1>Joy{$v~(>nQhY8}Ac1PMm_ zqn-uD|7#mnpj)$O!o&&CC zD$U2sF8~>mlGYdF8XBk93pa8iaYUt?1_s3>Xa z6ucMlB?#K%qoj8nD(KXpEbw{M6y$!7=HJ9UsJk=8jXNq;7b=^eN#m9%u6k6r`#6Y} zhaI#^1W1)?PGcUWZ+ufPOE z(&B;|pVDWdMI0W4;yzz(bcc(mP5WaSA<^aouM}hrJVQ(|5El{++9l_%N9NMoqhC<+ zDubxm(VN%WNtVLuSMhfMlYGDbM)!3uowq}y4@-)HEkg(B?6&ml&BgBjH7mf~_|p41 zhR1!dwG&ugdffQk{qwRL=0Humyy7J2dW70Lh{U}F(1Z5J?>Cg6#CJ?%Ds&h)3( z>n$(Z{tOgozzRhItG+G>00>~C2wZ0ja(!UGmEYlnlpmRuSUDmLjotU8a>T}oWG?y3 zSOA>U3&rqKj?vQmH+v>-{^e_SRYu_rAK933n_3x7tlCT31M(OdsY=Q-z+UU56Xfw@cSR+8iCZPP2Ld zYu8brcEqhd`3u(cRagNhEVra*xvXB^@*2KLr7}rnC@Wr1=%)C6|6FO&QU(|TeFvPk z^#0)`z?M2UZ$+4assJ8nvMLNDs=5PL*C#VO>)THdOv%uiRE*E)Rlj!rHHqhA$ha=g zl%j@e03Ad9fXg=5KW9L*zo0MuTQsVQEfX-4)u#~^Hd-wiIxf)LSJ97|d)=LW+d~_zQha;& z>}V7-eDjvOvZs$WYtg_CVGT;0Oh2?qzy{NJGB9gp!cS(w%X2u-ep*g5yV%z0L>%n& z=lrl{HdgxvKN-VSKd}yVJn}{lD;dMkx<6~vNlrgs@!5jY030QUd`d9o1`Xqv$arjQ zZp6HVHZPc_AF&o!a4~*H3iSMI?9Qux#f?y0;*)8Gp`Yz<;L935O!*pIGSgoX*-1Ac z{O}JI!B+|AE8Zjj@_l7kBW}N3@gKP^uhyBqa*^_{u&*SSki)bx{yT@qj}3}Wn4FdZ zx39O+=-i}r+{{hfKtg6Npw|Sz#lgkF%E86T!L7-~Da6Am#KFbP!7IeUk@Sfp;QtBi q9nGyRy#G(YrOCx5#KkMb&CATe_bT`t5Jmqg0LV!zOI3d|4*3rrI-SJ; literal 0 HcmV?d00001 diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py index e30b9fe346bb2a..1ff75a24cb8314 100644 --- a/tests/pipelines/test_pipelines_document_question_answering.py +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -17,6 +17,7 @@ from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline +from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, @@ -33,6 +34,8 @@ if is_vision_available(): from PIL import Image + + from transformers.image_utils import load_image else: class Image: @@ -40,6 +43,9 @@ class Image: def open(*args, **kwargs): pass + def load_image(_): + return None + @is_pipeline_test @require_torch @@ -47,23 +53,34 @@ def open(*args, **kwargs): class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + @require_pytesseract + @require_vision def get_test_pipeline(self, model, tokenizer, feature_extractor): dqa_pipeline = pipeline( "document-question-answering", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor ) + + img_path = "./tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png" + words_path = "./tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json" + question = "What is the placebo?" examples = [ { - "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), - "question": "How many cats are there?", + "image": Image.open(img_path), + "question": question, }, { - "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", - "question": "How many cats are there?", + "image": img_path, + "question": question, }, { - "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", - "question": "How many cats are there?", - "word_boxes": json.load(open("./tests/fixtures/tests_samples/COCO/000000039769.json", "r")), + "image": img_path, + "question": question, + "word_boxes": json.load(open(words_path, "r")), + }, + { + "image": None, + "question": question, + "word_boxes": json.load(open(words_path, "r")), }, ] return dqa_pipeline, examples @@ -73,17 +90,18 @@ def run_pipeline_test(self, dqa_pipeline, examples): self.assertEqual( outputs, [ - [{"score": ANY(float), "answer": ANY(str)}], - [{"score": ANY(float), "answer": ANY(str)}], - ], + [ + {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, + {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, + ] + ] + * 4, ) - # TODO: Add layoutlmv1 once PR #18407 lands - @require_torch @require_detectron2 @require_pytesseract - def test_small_model_pt_layoutlmv2(self): + def test_small_model_pt(self): dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" question = "How many cats are there?" @@ -116,22 +134,24 @@ def test_small_model_pt_layoutlmv2(self): outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) - def test_small_model_pt_donut(self): - dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") - # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") - image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" - question = "How many cats are there?" - - outputs = dqa_pipeline(image=image, question=question, top_k=2) - self.assertEqual( - nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] - ) + # TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented + # @require_torch + # def test_small_model_pt_donut(self): + # dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") + # # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") + # image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + # question = "How many cats are there?" + # + # outputs = dqa_pipeline(image=image, question=question, top_k=2) + # self.assertEqual( + # nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] + # ) @slow @require_torch @require_detectron2 @require_pytesseract - def test_large_model_pt_layoutlmv2(self): + def test_large_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", @@ -172,6 +192,67 @@ def test_large_model_pt_layoutlmv2(self): * 2, ) + @slow + @require_torch + @require_pytesseract + @require_vision + def test_large_model_pt_layoutlm(self): + tokenizer = AutoTokenizer.from_pretrained( + "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True + ) + dqa_pipeline = pipeline( + "document-question-answering", + model="impira/layoutlm-document-qa", + tokenizer=tokenizer, + revision="3dc6de3", + ) + image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + question = "What is the invoice number?" + + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ], + ) + + outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ], + ) + + outputs = dqa_pipeline( + [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ] + ] + * 2, + ) + + word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) + + # This model should also work if `image` is set to None + outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ], + ) + @slow @require_torch def test_large_model_pt_donut(self): @@ -185,7 +266,7 @@ def test_large_model_pt_donut(self): image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) - self.assertEqual(nested_simplify(outputs, decimals=4), {"score": 0.5, "answer": "us-001"}) + self.assertEqual(nested_simplify(outputs, decimals=4), {"answer": "us-001"}) @require_tf @unittest.skip("Document question answering not implemented in TF") From 08193d3f7e01bb3b90db29b8bad344b0bb6ef12e Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 5 Sep 2022 16:20:42 -0700 Subject: [PATCH 32/34] Improve docs --- src/transformers/models/auto/modeling_auto.py | 2 +- src/transformers/models/auto/modeling_tf_auto.py | 2 +- src/transformers/pipelines/document_question_answering.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 545bc348bdb1ca..1cb0ae44db0105 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -909,7 +909,7 @@ class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): AutoModelForDocumentQuestionAnswering = auto_class_update( AutoModelForDocumentQuestionAnswering, head_doc="document question answering", - checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', # XXX verify docs + checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', ) diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index 46c85022ecad35..ba1e74e14caf63 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -532,7 +532,7 @@ class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): TFAutoModelForDocumentQuestionAnswering = auto_class_update( TFAutoModelForDocumentQuestionAnswering, head_doc="document question answering", - checkpoint_for_example="impira/layoutlm-document-qa", + checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', ) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index ce56fb38c890fb..161e2f004d8627 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -88,8 +88,9 @@ class ModelType(ExplicitEnum): class DocumentQuestionAnsweringPipeline(Pipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence """ - Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. See the [question answering - examples](../task_summary#question-answering) for more information. + Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are + similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional + OCR'd words/boxes) as input instead of text context. This document question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"document-question-answering"`. From d271829cfc9bb4706773483a54531e414797f97a Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 5 Sep 2022 16:23:03 -0700 Subject: [PATCH 33/34] Remove extra assets and add copyright --- .../pipelines/document_question_answering.py | 18 +- .../tests_samples/DocVQA/yrvw0217_50.json | 1226 ----------------- .../tests_samples/DocVQA/yrvw0217_50.png | Bin 96744 -> 0 bytes ...t_pipelines_document_question_answering.py | 30 +- 4 files changed, 34 insertions(+), 1240 deletions(-) delete mode 100644 tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json delete mode 100644 tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 161e2f004d8627..3329ce2dc48103 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -1,3 +1,17 @@ +# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import re from typing import List, Optional, Tuple, Union @@ -89,8 +103,8 @@ class DocumentQuestionAnsweringPipeline(Pipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence """ Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are - similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional - OCR'd words/boxes) as input instead of text context. + similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd + words/boxes) as input instead of text context. This document question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"document-question-answering"`. diff --git a/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json b/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json deleted file mode 100644 index 03ca09d6f1ff78..00000000000000 --- a/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json +++ /dev/null @@ -1,1226 +0,0 @@ -[ - [ - "but", - [ - 121, - 89, - 147, - 101 - ] - ], - [ - "no", - [ - 153, - 92, - 174, - 101 - ] - ], - [ - "bleeding).", - [ - 180, - 89, - 264, - 103 - ] - ], - [ - "Each", - [ - 277, - 89, - 320, - 101 - ] - ], - [ - "measure", - [ - 328, - 92, - 402, - 100 - ] - ], - [ - "was", - [ - 408, - 92, - 442, - 100 - ] - ], - [ - "compared", - [ - 449, - 89, - 533, - 103 - ] - ], - [ - "visually", - [ - 541, - 89, - 604, - 103 - ] - ], - [ - "to", - [ - 610, - 89, - 626, - 100 - ] - ], - [ - "the", - [ - 631, - 89, - 658, - 100 - ] - ], - [ - "HOPE", - [ - 665, - 89, - 719, - 100 - ] - ], - [ - "study", - [ - 726, - 89, - 773, - 103 - ] - ], - [ - "data", - [ - 778, - 89, - 815, - 100 - ] - ], - [ - "on", - [ - 821, - 92, - 842, - 100 - ] - ], - [ - "Premarin", - [ - 121, - 106, - 198, - 117 - ] - ], - [ - "0.625mg/MPA", - [ - 205, - 106, - 328, - 120 - ] - ], - [ - "2.5mg.", - [ - 334, - 106, - 392, - 120 - ] - ], - [ - "Figure", - [ - 218, - 179, - 302, - 200 - ] - ], - [ - "1.", - [ - 313, - 179, - 332, - 195 - ] - ], - [ - "Cumulative", - [ - 343, - 179, - 491, - 195 - ] - ], - [ - "Cycles", - [ - 501, - 179, - 589, - 199 - ] - ], - [ - "of", - [ - 599, - 179, - 626, - 195 - ] - ], - [ - "Amenorrhea", - [ - 632, - 179, - 797, - 195 - ] - ], - [ - "EE", - [ - 413, - 205, - 448, - 221 - ] - ], - [ - "Population", - [ - 458, - 205, - 600, - 225 - ] - ], - [ - "100", - [ - 148, - 256, - 184, - 269 - ] - ], - [ - "~e-", - [ - 682, - 285, - 717, - 292 - ] - ], - [ - "Placebo", - [ - 722, - 282, - 807, - 295 - ] - ], - [ - "300", - [ - 814, - 282, - 850, - 295 - ] - ], - [ - "\u2014:-", - [ - 682, - 309, - 717, - 316 - ] - ], - [ - "TMG", - [ - 721, - 306, - 771, - 319 - ] - ], - [ - "0.0625", - [ - 779, - 306, - 846, - 319 - ] - ], - [ - "(CC)", - [ - 854, - 305, - 899, - 322 - ] - ], - [ - "~~", - [ - 682, - 328, - 718, - 349 - ] - ], - [ - "TMG", - [ - 727, - 329, - 763, - 343 - ] - ], - [ - "0.125", - [ - 779, - 330, - 834, - 343 - ] - ], - [ - "(CC)", - [ - 841, - 329, - 886, - 347 - ] - ], - [ - "\u2014-", - [ - 682, - 357, - 716, - 364 - ] - ], - [ - "0.625/MPA", - [ - 722, - 354, - 834, - 367 - ] - ], - [ - "2.5", - [ - 841, - 354, - 871, - 367 - ] - ], - [ - "CC", - [ - 878, - 354, - 908, - 367 - ] - ], - [ - "\u2014*-", - [ - 679, - 376, - 716, - 396 - ] - ], - [ - "0.45/MPA", - [ - 722, - 378, - 822, - 390 - ] - ], - [ - "1.5", - [ - 830, - 378, - 859, - 390 - ] - ], - [ - "CC", - [ - 866, - 378, - 896, - 390 - ] - ], - [ - "%", - [ - 114, - 376, - 133, - 389 - ] - ], - [ - "of", - [ - 114, - 354, - 131, - 370 - ] - ], - [ - "subjects", - [ - 114, - 281, - 136, - 350 - ] - ], - [ - "\u2018", - [ - 215, - 428, - 223, - 440 - ] - ], - [ - "2", - [ - 249, - 428, - 260, - 440 - ] - ], - [ - "(3)", - [ - 269, - 416, - 304, - 467 - ] - ], - [ - "4", - [ - 320, - 427, - 332, - 440 - ] - ], - [ - "5)", - [ - 356, - 428, - 368, - 440 - ] - ], - [ - "6", - [ - 392, - 428, - 403, - 440 - ] - ], - [ - "7", - [ - 427, - 428, - 438, - 440 - ] - ], - [ - "8", - [ - 462, - 428, - 473, - 440 - ] - ], - [ - "(9)", - [ - 491, - 410, - 523, - 451 - ] - ], - [ - "10", - [ - 528, - 428, - 550, - 440 - ] - ], - [ - "11", - [ - 563, - 427, - 583, - 440 - ] - ], - [ - "12.13", - [ - 599, - 410, - 657, - 451 - ] - ], - [ - "Cycle", - [ - 403, - 451, - 461, - 477 - ] - ], - [ - "a", - [ - 498, - 463, - 545, - 497 - ] - ], - [ - ".", - [ - 556, - 477, - 559, - 481 - ] - ], - [ - "(ee", - [ - 572, - 450, - 650, - 502 - ] - ], - [ - "po", - [ - 645, - 445, - 723, - 487 - ] - ], - [ - "There", - [ - 120, - 513, - 171, - 524 - ] - ], - [ - "were", - [ - 177, - 515, - 218, - 524 - ] - ], - [ - "no", - [ - 226, - 515, - 246, - 524 - ] - ], - [ - "differences", - [ - 252, - 513, - 347, - 524 - ] - ], - [ - "between", - [ - 354, - 513, - 426, - 524 - ] - ], - [ - "CE/TMG", - [ - 433, - 512, - 507, - 524 - ] - ], - [ - "treatment", - [ - 513, - 513, - 596, - 524 - ] - ], - [ - "groups", - [ - 601, - 515, - 661, - 527 - ] - ], - [ - "for", - [ - 666, - 512, - 690, - 524 - ] - ], - [ - "cumulative", - [ - 696, - 512, - 788, - 524 - ] - ], - [ - "amenorrhea", - [ - 120, - 530, - 225, - 541 - ] - ], - [ - "at", - [ - 231, - 530, - 247, - 541 - ] - ], - [ - "any", - [ - 253, - 533, - 284, - 543 - ] - ], - [ - "period", - [ - 290, - 529, - 344, - 543 - ] - ], - [ - "examined", - [ - 350, - 529, - 433, - 541 - ] - ], - [ - "or", - [ - 440, - 533, - 457, - 541 - ] - ], - [ - "for", - [ - 462, - 529, - 485, - 541 - ] - ], - [ - "cumulative", - [ - 491, - 529, - 583, - 541 - ] - ], - [ - "no", - [ - 590, - 532, - 610, - 540 - ] - ], - [ - "bleeding", - [ - 617, - 529, - 690, - 543 - ] - ], - [ - "except", - [ - 696, - 530, - 754, - 543 - ] - ], - [ - "for", - [ - 759, - 529, - 783, - 541 - ] - ], - [ - "cycles", - [ - 788, - 529, - 841, - 543 - ] - ], - [ - "7", - [ - 848, - 530, - 858, - 541 - ] - ], - [ - "to", - [ - 864, - 530, - 879, - 541 - ] - ], - [ - "13", - [ - 122, - 547, - 141, - 557 - ] - ], - [ - "for", - [ - 147, - 547, - 170, - 557 - ] - ], - [ - "the", - [ - 175, - 547, - 202, - 557 - ] - ], - [ - "EE", - [ - 209, - 547, - 233, - 557 - ] - ], - [ - "population.", - [ - 240, - 547, - 334, - 561 - ] - ], - [ - "The", - [ - 346, - 547, - 379, - 557 - ] - ], - [ - "last", - [ - 386, - 547, - 416, - 557 - ] - ], - [ - "observation", - [ - 422, - 546, - 521, - 557 - ] - ], - [ - "carried", - [ - 528, - 546, - 586, - 557 - ] - ], - [ - "forward", - [ - 592, - 546, - 656, - 557 - ] - ], - [ - "(LOCF)", - [ - 664, - 546, - 728, - 561 - ] - ], - [ - "analysis", - [ - 735, - 546, - 804, - 560 - ] - ], - [ - "(Figure", - [ - 811, - 547, - 871, - 561 - ] - ], - [ - "2)", - [ - 121, - 563, - 137, - 577 - ] - ], - [ - "indicated", - [ - 144, - 563, - 220, - 575 - ] - ], - [ - "that", - [ - 227, - 563, - 259, - 575 - ] - ], - [ - "the", - [ - 264, - 563, - 291, - 574 - ] - ], - [ - "CE/TMG", - [ - 298, - 563, - 373, - 574 - ] - ], - [ - "treatment", - [ - 379, - 564, - 461, - 575 - ] - ], - [ - "groups", - [ - 467, - 566, - 525, - 577 - ] - ], - [ - "had", - [ - 532, - 563, - 563, - 574 - ] - ], - [ - "similar", - [ - 570, - 563, - 627, - 574 - ] - ], - [ - "rates", - [ - 633, - 564, - 675, - 574 - ] - ], - [ - "of", - [ - 681, - 563, - 698, - 574 - ] - ], - [ - "cumulative", - [ - 703, - 563, - 796, - 575 - ] - ], - [ - "amenorrhea,", - [ - 121, - 580, - 230, - 593 - ] - ], - [ - "which", - [ - 236, - 580, - 286, - 591 - ] - ], - [ - "approached", - [ - 293, - 580, - 394, - 594 - ] - ], - [ - "that", - [ - 401, - 580, - 434, - 591 - ] - ], - [ - "of", - [ - 439, - 580, - 456, - 591 - ] - ], - [ - "the", - [ - 461, - 580, - 487, - 591 - ] - ], - [ - "placebo", - [ - 494, - 580, - 560, - 594 - ] - ], - [ - "group", - [ - 567, - 582, - 616, - 594 - ] - ], - [ - "by", - [ - 623, - 580, - 643, - 594 - ] - ], - [ - "cycle", - [ - 648, - 580, - 692, - 594 - ] - ], - [ - "13.", - [ - 701, - 580, - 725, - 591 - ] - ], - [ - "Confidential", - [ - 121, - 905, - 233, - 922 - ] - ], - [ - "12", - [ - 474, - 907, - 494, - 918 - ] - ], - [ - "of", - [ - 500, - 907, - 516, - 918 - ] - ], - [ - "42", - [ - 527, - 908, - 546, - 918 - ] - ], - [ - "Confidential", - [ - 12, - 964, - 95, - 973 - ] - ], - [ - "Pursuant", - [ - 101, - 964, - 165, - 973 - ] - ], - [ - "to", - [ - 169, - 965, - 182, - 973 - ] - ], - [ - "Confidentiality", - [ - 187, - 964, - 286, - 976 - ] - ], - [ - "KERZC001-000285", - [ - 843, - 967, - 982, - 976 - ] - ], - [ - "Order", - [ - 12, - 978, - 53, - 987 - ] - ] -] \ No newline at end of file diff --git a/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png b/tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png deleted file mode 100644 index 60749a371d619fc343fd460fd5a7f9e59bde8d92..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96744 zcmeFZWmH^E+a}s1Bv=w0lHl&{P9V5T)3^o*u8lR8U;%==hv1D%<1`5g?(Xgmjr;WT zzTcelXV#i^*33F<$%qX= z6^!Hh3P?y59Q!*LN*#>DEvO9)#*um%293ax>RC|-;v%0Ug)&Q_Qw2!_WzeZUITu6G zse)g<`OifEe>Fq^RwYR3gp=No4F%axIUB@NckSw*kUbKB{+dc8E;;Kg%`|i#wCgaHNdG`X~QqXZ?TJSkbe6 z3c@;R`5%Xxzra78yuE5m*dC>}%q5N+9J^}T`LW08LlLnt==ry2!YIeSyKQLaUs!)! z8t1S8yu!+9=Efb;huUZ)ZY4!;)u(#xf;pu9Y31g%X?wJ|HVpuoZvqg7y<5gPDj(n{ zP1i3L0>~)5J1TtMDV7;bQ>y^Os=y^|+ zcBxO_diR~=%K4{;so3Uzc&f;?z~2wda1~(IIPuo9lF#Pz9sQJr;k?X&t=a{4_V4YH zux6)jty6ancI@G7rS;{N^e=ml{AAegg=Q z8uV<0vmaV&f8(I$N?uPvzN{g9tPXr+`x={E+Aayt(8@VG?UQ@*zqXEa#kexP`sH_% zbLi5mb{I^MS?;z5C8b)lz#V+H&T1I*l2>9Kfr0J%V$6TnH#PPwApUAQm z&|wt&#!s%1`~G>9)ax9F=7PrMNs|H!f{O_E>_5a`>*IYc-gc*!AJSGXs%CfcBd&gm zSlx)OpbT`rXuzjzkGzp6;+$fQp=6`MB1cYpKhYpp6a1~F8i*}XkWxiGQ%~2 z;fYu8tM+9EGSZPRp|9V9aqhrdoi0SUKin0LP92B={V+bb9xLD*L3KC%kEAI}ZwgEA zr%$Tb&1}KX%LdGnN&lo>xepJww(8U)TAFn!2S_9$)q3IE2X<_^s;cvIjuj>wQUku| zlrsm?_W1s7Xk(;FFkP}@h1#D{0X7vyh8Q^fLS3!2pIK6<@Anjl25aVC*Z0V?a<`Np z^jqk$%*y_yu7~l0xW)#Hq1QEJ)FCG@SLU+JTRKMDt$2r+qL+^S8D+8~ZL5te1B(cFke2}Z=d%{uX6j#TZs3vcYdBkiyuV!D3?gu*lmQ_gzI_saO=0^$ zZ854?Fcgi##>(T3^_e>S{coQg`{K=c9o@WHJkaLX*sUprCXbBaS9@1ocE(Q1F(?~R z*7Qu-Y!Y)_kteI<>U=2hjCSDpY2P$aAtUZ#lMlrf5OShOwEaT`Pl{2{;m zB<)khkbk2qOjm@_7IXCVt+?uZ4A(8mE5+B|uG#XDPbpfz66xFlWp3nJq#m2Z9FJ2f za+zAnfnNl-pV&H5NjbOO2So}udVjD*?Xf3@#CCnaB4`dCMy-7}R0bfx{8d30)A*w! zev@A0R8=s?ZO{Dcp6@2;$NO%%$g>N+ecq)}%T3~Nf!Sr9qVvYI9iz!7rI5B zd5u?XaN|gbFV`pKJS_JOONcN_mU2-I`m^9rbBB z!Dz{HKCQfA#z$t#gQ&$@d>`)H?>R#ci;bR3W=6USQXDH1%-`+K3f$W@>4V_!0G3L^ z4~3CRS{NDh3wWx~6HVC(r57@ZxnNzAM!defIZF#5n@taWw(-*d`KabAs>*zg{*fdh zG6%XLl-uczkE;Fm6P^~3fDxqtH)8%5ZHJ)>pic=_V1|Ag`vRdpE@ JI#y_^IwS` z(;3=U0^2Mp?*x9EuF{&_btbWim@)o!G8X^4ba#oZ)sc9~ZP#{EHuV(vggAwPZpHEbt-+Z&M@RQq^ErvJAohe$;JKH)WlzAztwIc3U zWp;}21d%`A&D7o-pgddU1Pz1VW^ZM(B%aAMqo+8pBi7%_j&H&6O5iJwz~k@HhU5!_ zd52hf5!bc>{m&UQq%{6k`@->u@?!QIE!&AW)0hg{h}F=l*8){B&hzzsF{TXKF-n!* zeQdweBFgG9zk>Y^B#0*LEhXO3BpZ#?%=*`8uZ zcA79-#!zxolsd-uUaz@AX$v}7*w;yK)*zR%OR-miieb*j8=p-!+Gh;UD-+g`zxu1M z5@D=xyybiacS1y7X4}VjI1XhOIe2WGBv)X|MKckeRg8>9<+BFYFH(3o$=_{U`Z-WM z-xWhk65P*YnrW9Q$e-`Nh6BN-Xh%!AlU)bHd(BsPj*6abHMk{L&Q6vz;ZFOdV{6V9 z9$p806$|bJdW2fHfvhq;w;8VDr#(3>1=lCpGd5Dht~>R_O3+g4V#o${ZuoCc_GAU0 zg+ESoQ~Vt@tVO-eONdDsMZ25S4&g->mb4$3X%VJ#!Kw7KdfWz{(U3MXv_dsbFz-CP zC^HM=(n>@vvklgUFT=`aH6JEF-3jdZ&^gr}4D_MeYeD6&PqE}to$9N^WJ&|e^HM;C{Tv`C3VnY}-= zy{C_fm0AQB`~HvFl96=Po{R|QqU1I4&s@Ue^eGI{%3ZLZ4&xG1ns39oH{MRYXii~b zwrkII(U)D#Vg1+u>D)fhFR5K6PD7>E0krt(!aQ~v^1Sz!YWlA0(I09?+U0GGr!bUv zz}~k!F=<5bGnc((8>bwRh$63F`U(i|W0w=p7lsTQ@3d}o-nNiL%4z8zwrCQXMl)=x zm)T>oOMIt3gk>vunbOCXf;U@A4zs#Ke`}ZkSDDV|r*or<1eYAtJupeLLiH*u72mfz zRYU7t&lU4h#_6Y27-ZtQH%FOk+I&(pzd>{+vmk6Yj(kQWH5ki~wE{ijjWQtNRGlz9 z!#FeVV4zfK8{-~2!V+^8WDBe(^#1x(Xlj}8 z)toqTL1-;<1V2z40i3CW%Xe4F-)QH^J~-4)3SX=2ENBTh%rVTFge+X088hHRm4Mm2 zY#XbQqqe|Yg-V?Cb7iM+f0A)5hhPUxErPzWaH58mH44l=zco$^+{-IzdB2=_~@QfhNevEaU^$Z?hofpLG zI%&gaS#?o2IOOuuLYL`9W5Q>Xsw}xzG9`SSKJ6zWIzaX#k=15DV8-v%eed@4W(zQ> z=oZ+9k51o2Y^S%>!m-k^tkgk#go$AUI=4Ar#!T?X+1bBEDhHdFLNIf`SH14cBbo6# zSA^}$#P=5F7p4&_0EBj2d8=ifvrm3gGlwH%v=*MFs*%lPuqtU$dZQ9CFq$f>nYX>P zyDkm)MOn>FR6s*lGk*uzp^$^hV|-%G>)F>72Omx4*ZrPj1tu->D=oN0VnXB>(tK=> zi+eRp`Y7&w@5VNraIG8AvAFal?BvQ*1~p#^1^1@0e%dbpTI-vSwsiD1;C)UoW~5vF47Kj^IS{a<~#fq8cU-NdGA_mJdQS)UELGyVI~DGF=@Nk{N&Vbi^2KSN;_Ujj8$BQF4M1?E!^zA=bzb99#x^GWBQ_&ccKZ z!9Rl!<2e+B*fkCM{MnDR6M2$C64O|C%tpPz8cPn5LZ}J_nbHI{R!v(=<%uM3BA0~K#fs_r>? z&fBQLnq18k8C84#&wUxN4V&XaKK2knuF6K?5KV z);p?{?-SNi0))q8U1|eao_x6OQnKQAe3d^NzOdApMwEmIY63Nt@bwie6a15ARL$(6 zk;2D=QN@lrWF|b66~vLG-x#t;r{%^j+63L3PnsKCB_cHmWo@LAQR-vlS2e$D*-=zy zbr-?${eCBh1=uNGVOB@@UKuv8-ZnhS11~5tHyiPxXLCn|zS@sUJa!qn7GwR{-@C82 zju8$}#R#=Q@5rppFR5pXYOkSuGlmk>k@I*pOfc@G&Ju<4hM+jMvB=y*ed*-z{`c#9 zo(^tx2&T>yz!=0%(^*Zj**;6lN&vuPizE zyvhgq{fsu_jY%g7ZCZ~?phAnVCdMXeaSW=z=EY8<%nOJU1qdX=T4l;&UKH;pVo-_| zdxSZZ6}&|X4ZU;h_iKZ>yaZK0%rJwSLkg0T%ivXusAYX>3ECRM{R??(8DRq=yOFxM z6BMtLOS4E?;O9!S@^?6j-VG7mb_U{s!9v>tN%6syI`(}LQI|=&GmX1hGamabI$b|` zKLh!EGoYAFxu8i8nOdt;op$cUKJSvcbY5DqaZ6^#d3Z*H`+mH{b&q*clEi5D)N$e1 z-w)#~{nhtLILRK_yi(H?KOzK&MXBZ^;Jwxf`VGE)H8%YQ#!jY3B`qU%qB$zqJH*3( zDwFb;a=T#LZZ}Cr?c2ctO%fZ8u`i4DH)N=cS+eiOIzMg{MXY=B{5m_gLM~(ZMXnq@ z5DwDigN;HLe01ATHM`RU_92<{%}H-?9QEJGna#oK$~I|);!29SAMYjuZI(n<*fXpJ z?6Y`Diq1EeQ8wbcm&%PdAD*b{E>A4D;{>!07G-5W7J)@8^|~&%sE$34mXvaSIpf|bz7N9RV*_!Lep0(|>5(-;w|KS4*sxAW4= z8`io9`x<3CYKj%m3W1V{kJ^)~_kRZ%@SB8Fh*xtk{(7A4yroUb?I(}&7O;2aYoB2a zcQXzsJ*NokPCGIZq)3;9!f8?NM|R;i*CBrO2zqXdA}>`*6o`XhQwP45WT{C9T@@_no4x|n&Yl=YrKdLQm9)K;jN)o0zIaSp z#0thiyj|wkZA<0A(%oLzjz$I^waMhA#KfsCYPzWQvS(EKN5-CM#7Cq1JN?GreI@ol6u^iszQXh z$15C7(SjNl!#8~nBoz!#IyKyi6%`-Qy+fP~o^#fLk0$pI?gCs&_yYEO%wu)VGB`ir zPJRUsS?J4L$pmf_(T!UUN6ANHp6TVnIR%?RM4r4ntcA0 z2?PxuwNoe`4A4V2{o5WEgOyB5i!SDTO0yTy)dH&)a|F5#i|UgI0r?rDOUV~N+G7q^ zsh*sT`B0#I9R@YCm-IG#xoxe(_bO;1kuBX+phkHzg{yr7P%O|_eQHbgLu2k><(g(V8 zsVLzSsHw`SF^F@*01uhJ`Bb?eH5utqxq^NC5q+{@3}z@oOot;S-`k)EY)RdtJc71F zySm+Yx?BvTYMxIDEDLbC(~>AD6vbUDa6RB(ET_%HyzwJUU$!~_f1x+qE+IR)&pVJgX$e6l$V3y!Va_-B#HKlR}@ zTt}T}kNxSHr3ghSuzHSOvpK)9Z)P6X*9?O$gmcW$ASxW8NT}sf`Kb+6k6nRfaj%3q zI5j)bV<7ub=M#Xm4?9*hocGVP^4AnQ!<>gh`G&;ol#}gduXGe=<2do1tT zbE&yX&%aGMIHtdja6sJ*yyg+J2#%HLYA9hYSA)1&Id$T#3Dgl<l+r$$(MMV&nOL(`ZsG;i>!nXQa zr?!w5-IQE!{P*)f8`rn|$!0`mTrf}r#;;HGJTe!v^BI2y?H!=ymDzkVAv%P}OdDS7 za4^I+4{fUAfL=N>?0N`9GvD*#-dZf*+3eT7a(fC}PwEjHT1#L4rrgjM#$IUQDYO`I z$ZHxj-+QrSv|SuDRz@9M=bH=UBQqId*$cthDr8U`)ycyIR8hyvncQ{6m}n=J-%Buf zey5q!_Bpr!ER)_vWvyv3ZH`9hv;VFB-A6hW&^GbPxeq6PocVIGMG;R59>}2$ymH>x zuvVc0WvKvYy?|_xh?*RvTclnqI$nanL67U0Vvn4E@%6L|_GGhf6GO%6P9PHtY8VYc zpjO!Za{arNFaBYaNQ+`H71joRG13l>r9sjNt3y6Zz=E9#5HXo>X>3u%jKI)CVjcK1 zvzb`qOUh{VvhRt#W&0)*V~%5UT@!6TI@|9E;2rd_s(}YX!OVww&irD_Le#s~tJ} z(9mB#sRg_i7R9#HR!*8XPSbR(eP$`hF?VRvEp}N&K&cqC)?dnBw?7O9l@{wfVqsV4 z;3g-L1`h7M9@Jkq6RC%zn^a;p_7eAjIr>fp759{C=AEM!+WaC(QnDL2ZQttZ=#*8s zPnR>U$NBFeEubHy2YmbvsRGc~I_hmEE4@Ju>@xOCYE?F{mtu)O+<<^z=iL_}lSi|M zrx<#nq|?k<%crmZrl}HfiluEZhVIR+&WJB!0p-o?Ix?I4Nx6_Qd(Ly&A9t4E^X!AMo{_$JtOWB{OQgr-Wq|XQ=cZTs z4ss;~`z=nEecsY1GG()BENPKbYusaq1w%Gut}F9}0O@9+J>Q0i7q&Lm_vZj@FgQtj z_vMQfGWXfD`Wb<~f6&kRB-_Sj)-}n%(Z1kHkg^T-&vTrFsM; zOZQ4gm@*h%w(sKW*B87(t}udQDc00Xt~f^>LsCy7#yjtFVKire@5vO zkwEzm`Nk@`wiyp67VVj1_+J~`yXd!|9d6JK=1!y%+YTECizf;BU@ag z@W2<~ST6sMCe2J%m|P979{-Sb2rTOW#M8K8YJ$2#yCsI4Y*JQ~g^7jT4`ODX;-myvjWc zxzP8hvqEqpY|&!5N!}%fGx-w%XNdgVWCNs~(efNb*lm}H*w4+GZ+u-LeZKj&Cq@#; zhdw@ZS|Ix9@=>IQTeq7v-?(nW)!ZDG9FbvWX+b!fZFw7#<60yv9mJeRBjzG`{xY=;6(#yA!R<> zbwHQ}_T$b9*;BrRcLUdcaw^L9bVr);_0G9(4`{X}#DDv`VI}k?^uwE6AeaiNUqSyM z^-T_`z8l@N-NnS(Edv6--(yh)!-iD?)E%z^+iO7T<3?U2vxRs-FtqvO!%^Sq*b&Ea z@2OKqJL&Tq++rwuS6Bfxhpj@JewmNg-(DATJzEyN-7bHcyUV^=3GecY2AY)Y?`t>- zSki9!RGxWlL{Ei0N9*P3G~UzHb;K-#VSkOCK}rfkAAn%4qmU|}?fa0_eHUK_K9$QY z;t!&MYP|J}3Db%{?Ty&VtULCb`c*+_Pssxkynk_hT$=;Ax>6T)jgw20%{fYFr~t)6 zn2m9ocGwSsMhPAMeT$l$ac*=UjRUm;D=Ga9_J>Q(#l3A8$c^@Xi5#!tJ&PzYw3kE) zs68rv=_&)WaAKH0oc(@MifX5v+cO@I&laEwCp_`*Xl|4iE!9zymqV_gy~CIiYbvQr z&lDp2(yc?(;xPc*tt?|t41;9f*x~9TdThr-yX8FDKlN&V!U=2Hqo`jqIkr7bccrhP zni8LC<`M{;dhNLgy|=pXwnexi5=CyUK!$k7oP&;WAI-&GZKGN{r5nVhHV+LelpdH9 z>C}xzXXJJal{M+en*Kz-92o3o1>Z@%1%mK+p7H26JbF`s}vUDjlp z2!({Xw2b!r>mIGK!6a>5-ccY`^141;%5YYxBjFQDVGdM8= z&~0NcGoMhvM@)B6qeYhqWlD~m*RF}wcIk;$hPBqD{L)GG<4S29k=1O~%$VK~d<;s| zOYtZ02ef_-+q%Bpv{*QwSaX64mnN%m9&vv6>)9CEop2GZ=H6})RxknAvs#*IvC4SW zX6q495M>}r-DXDoN4enW)i*UvA*nKEptJtEp;c*$0I43hkx6T!fJqC;Cb4tcw@QE8 z9>!CBHQhVod$wo!*{McK10)?92$F{d`cX3Sf0pmWZux&_i=A+-cJLYJi5)E zBEoy+Y#>2XG`5^0)64`@=(`n1(q%pD%UpLZL?mWSknvW!KdBWHI7vtSLFE_CWdv&rd2B6qY_n(%6aUtZ}MJRjDV=qPFwl+V5k#V(6( zSFi}z;D5by*f!7c2S14zAa78q^NzXJ*aV}cricGxU1X81!ht!P3 zflHRth8L?+RzMo1r3VL-qMcI6zw7BXT{Zq-^2AHLt_K$k`$+|YM{1TrVP-5DGbtxj z!S!F3&>6+_qJ>sr)~^-p%H`SG+@-P`piuc+dWGoxS0Iy8#oR^;s(~b<8K&15cjp5V zEke=O{4#tg4Z5}g9%B1CJ7RiP6YqLrMtqO5iFOAr^4`t8oHfsJcg)q+oCwjM=T^zi z@a1Pqzj%$qQVyguw=0ESv6h!8K)Um_F3VIT(6O%YM)1^Ot1lj`AVvW>d!>}Yz5P%Ogm(OuvoyQ`MY8B8+oYdrCz`OAYC28}yUVAf zqyW#nhD26-L>l!$j|$D6Qe!d-PQcyT*G#KBdw6Pu$7IJw*f?T8mqSEd{{>fP2iWZMyT zHuWf~mfO^*+-Spz8~m>|Z#%nlS0JZ#X7^bO%JSn7<{*ZywMn7ltnoI(JABL1R8A`P zy|`ee!FD*4paXaDP&e@1eY!W>gYu+4T}wfWf$yDwM@v>(Tj%{se!*(L#bGnK{J61Q ztYuU@zx%4Yxg|KE&1r1hQWF9goT$0BmwdJ8le$1Lncv}%SIG;;nZrGD)Au5?_@(+po^1W0{V>tjfoRe#?$x+)Un zc3II&hLLW*a~XPJ8AqS5ozikT#dN%}zSHv6y^;^f$c}lEkL_&tKy8%}M`ip7lo=5C z#rptM6e>pZ7$?`@KlPQitKYq~=ryKFBei(SjgSAj)KvCetyMI1IA}o+l2nY3KRF)G z?Sv}RZB8E4`5LL+?(LzbX8(38q$~HU9Qv$*K1S=%e|A*9WHssWu(MUD(zX~z2w`14 z_UuxjcXVBA0rk@!bd4TwO%Szvtc!yy%gNd*`u=F#+)%?iy8U}iBmB=ln;9Yg)jsjs zmt$>SGh`Y9K^c&lL*W_vyI(vf!9bYNg=2Sw?Ql#vbm{+MB+N@MH*~ zn=mv&$SB`^_8eKnfL4FDvsbc;7`L>Mn?5yIcOCE?v@_ab0AsCZFNCodp@XxVXGsfqAF6vai8(G zCM7SD!vL5;(z@XKcaHKRHtQ|Q^$3pSZWraEuTy1Pc_mb_ZTgt18-v~AVXyAZ@xtQI zKm01cp`NM)x2u1uZ*B1x&YA~RkKgTCU#S+jUOSV=0A4{5yxa5k7se>F#uD;B#zCG^ z6+C3H4Z6=jqc7w}=W>sO)x$L|4P0*#8Id@=pGPL7Ju3YkKL?F^4maynxVmvcGm~-2iNY>MKOL`{ z?AW5T8Vs*EZW2i%*Wt_tvm9`e6ftLf(j&2jxDhAwgQEAxAsp)^7E0E$>f4gPhTV|O zkdR-Ka@4tmQ~NN7zs1&4h7%5zkB41{0$eFUMF2@4#c|ZwgOVP5!>OUb zr~Zo2wwJ!g`tzVkEVqpsv5e@xQssj3)D%t5uHxjpTOtZ7Yrf-@4IkXAn(6#F|0ub3 z19q&6NYZ!tte-OlEJ~(D6E2C(hb*4F!^!k80GTb_w@fFv`(74{h(D zN4=ZSuY9cQ?K8^dJf`$tC|&t(KYtoOgWm3U z9LD@eGzxea(_O}!74%3rB9yynSbx{{@r~pDZ#Lrx2gfmf5)sfyR1Lo4_J_|hNm@F< z<>dX0=rQ~%;UsFE-%W7l-^gKcq8ZzVk|HPs@?v+ielgPRW~K=H@$iT7Zs+(8SBp2q zb-)cgm0}(>rV}?H$6={KMNEQC2zOvwbv2SgKO+MIx9Nc#++nWO2UX!kPzvJREk{-U zO&}{7@t6P17Ve-GD^-<>r93IvK5pxLeECsaotqDD zf(mB!yoft#NbfurGs_F52cl#~MO;T3bTmD=-Q(kA`O7!M-#Jce<^%@q%2sT@Ma@7l zSwu=^>|_%|zAVa{Kj>M{PEx<1>~23LR&3z&d=O`7XqytnTXdf7S1tXZ)~K!EXlNfX z>r|_>nfj0HSH@E1nZ<)uB=Ix)7ECLTr#`TTm@U5ACr4!zpK8y zqBdI0sJd!?sb=MMr+d6zOCzR!ZV9GI%sN}g6MvkMTmqCsSw^nMbH1;VRBpDcy>FlN zHjbj=JG4*I5uVjnYKgk<7?#$v^Zd>1UM>fdOAZdwGl%attN*pY-eftm)@rLQ+guuy zOjDFNY#um>GlEW9>FIP-3=0z8~|%UkD^;u$qXQ{SWWkGIg-sUC{dFIIlMj$oqM{1Cf%JoQ=j40peOu32M{rrGIDX*pj5J#jS1my1yW`W0v4p7&5Ujs2ugT2PjelkEM$|*r zs?IN_yR)4|k!fR=#AsWby<&NPWQj~ovq$r;(Hh7XBO4d5&5oDdj3qySi;7^{SP|O6?qVv zx{8pubNrZohrU>G@8X?`6#taWK&WElJ z>W@|;J1!t|RpHfhRy+!(nMDih$3;y40P+d;dmW8r$C{`LY3B$e{uAXCH7JY=HH`gA zI8XeelPii@DjG@Y2xr0c$qli>zbjM|Bpb6~Aw%^_ZQbi%JL=dkhrITIp$C6|Qw}|~ zIbdF@cKQuy%?i%^3f&`K@zZZmKH@NvM5kIt`!|tqX7V4QfVXu23J>bBJb6y}ui(J{ zud#oKePq;(B>D|!<}>9Ujt~xx8<7(mhIZ29+imE>zBHbtpYQCw^&ebB{|_j2J83P1 zE`R?gG!V+H%!vXO?wwd<>O1&|;7RlH9Uu7DW2PpAH&XUs(X`&TJjmlM@)Kh`E}Dg_ zYo~ith3Ps0u7V#OTZwc(`cgGySpZ_gEHcC^%GQR`oQp&=N?yzLOd|sr;R!M{vs)cJ zlXv+NvsM=n6m8pI`349Tl#A-&a<^AJldY$GvW$dodegjWSTnYH$KJCo|Ln6v5j-G z>IG@FNUeLi4eZlpq|cd~mpSW^>IMB5PgwLR`8OrG&j;Eyj%`Ozvl*uPR{MLXGY08q zWs9_U{rUvychJJlpeb=8lgpPSc^6M*k)XD#1|4B!NM|V!iKJU@;_SAiGeh>LA6g_| zlKWWd#AW&`imHfJgojydd3$vfHaIT?dPas9s-4ky6y+?k;m^|ryd zS9R0UB<$!)k*`7puTkKI_v?z54$GV)d0{Ep5M+A^9d(`zUP+u)Hu)RuJtk^8-19EM zMB^&<=?89Xj0zugz3u82Cfu8Eh_S+ADZcsvH$+CJs61D$-Nk%P*vTOQ9zw1Lz$5tj zpBsdJDchdYfp^>0oE_V}RaI`AccunQGe!f(OP0G!C83*yt!vb->%1mIxRk=_lQ@F4uJMsO1y#bkBq&la3 z1G@vELaJ(0%t3lO7%1peY6MgCY%c;&j@U(|#4&+{H=7F?ryJSd+_<~mS`fWrc~7)o=zT+BLAhh0*@gdtH@hn7dB`*x;42WZ=S(YXl@1eNQKfu90)}Au9v)h{ zHb&~nI4%zY;WLpKz3?klEKR`q+GBXt5aJA*B)gy6!58UY!f^viSw;ADv5{_4j>@|w+{-?W_ zMoB7}809smBHbFnJJYE4FpW*`ZY#}eR$s1rv)++eTwMCH8djaG_4@A{0{>JnXwgR` zsvlyOZ_woNLOtMb3l;WwsVqR&mqa6%oO#%+w6v~N@l&%k{ZSN=%c=FdFFV;B*i!!F z$e>k|XbHi28CF(SN@}+ym3$H%fO3+a_70|l=}u{m z_A`g>Qq)oBG0{|I!#2xbws1F5uTXX-#@VTFS(u3rBTbPi{6_3#`8fbeiT(~%AGp!4 zP#6s7kZlJmu8*~lv#8La#ci+@gip-^T&tt{l&DPhE1%~8XKX=DS4n78Ise$Y5c}iK zo%zB(YkCz=T7&Z#B#uClHCVme{fm$Cvdw5A-_%7j#Kap1;f*v_j_suytj9c?n=RX8 zi=4~otInbE?|=CW^lZ^Z zd@Ed=P`uJ*dhMhMhxI813x#C~P1az;*GTxdlE1(7aMbt)&S1c23CJ+d+0$0U@1NAz zaoiXrG_mZMdIuF3P;A!0YzPIbnDHWTxcUEO3P5hh?dx~E54T&QT@dLBV(*KK*7IAu zkI9tH63kUi2&Zsi2JIO|{o4umLld`ALh9Peh_=iU!;*>(O?CIJ_cFBw;FE~E0;xItdoWjeN%3k zcG(qQC!R7-)`br}e5g|Xd=Rzz{4f!+E8hcI`j!%~1+=1fXcGQok`DtVq@B5k|E9e0 zD#W0`W*Fn$dn7R0Nn6&_%OmQtKcYQ-w4HP5XF=_{o~x@7AJrepp25e#G_qEd=8`?y z8`VnzL_7byC@P89!l+IXQ^4Ev3N5Xv7~=%S{~c zSRb|pZAYDiW^_O=7H>rv`K4YW2dsP7BQ}GLoNwb4hI*5~Q30J@nnK#ulZk^+DHKA3 zMv6}bbGe?zp~TPjU+OXSy|#Q0b0*1ALt5#y%LTE#l(EHeZ}LP4O^oQh(7`UPlC>Uk zN&bb~2cw2|;%~i`ISDAqor6Z?W?oQ+T6AYlMne;E^lgLv~Fay$tSd<6X%DHB{XxRneUL);n; z5wXUV%0~YASb^1lB8)5leTtOm{nyhVREz^C^~QsQ>Q~LsU8vJoBAIn^d9gp z%JaXa_507w1OE5gMFA7=LpI8Bv7S6?w+Tt3-B2P!)qmr>1u#Z*+%b`Vb1N;Sm-}YS z#);H+QWZl3^Gg&TEC2xRTy0aUHdQ0!b`bxBraw~$1ey%_X)59aQV)(Ia1xB!kZP`b zvSMhgHlAaDQ)R_gtpVZ3N}_ZesmE8^z>!bVvz>I%3Za3T?9Sl;9jR25y1nnnQ26cZ zV_Ot<>)kJ|FbH6w{X#pBV(`(C63MIc+`E+aWq`|qhtWp;p9NC(d;diVG-TsL7Q^<~ z3}ud15>db;DV3P%-NE%dbT zCJg|0VZ5V&GE+7h4dwxUvH|A||6FK21cz83Wj^_z2WKorZMrOccn|5$i4TeWaI+5W zdyW_XVY8GaVp65ng1hyg?Q=l$`vz~=NyZu zQjJx3fvt@uGuhVxfo@fZ6FmS`A!0aB9m=h6mBIx>W{B_Mqe zzV<(9L;I~;x4n!#1m&dPn8#~_vC!-JR8OqceC3-Np&UgNfOb|) zCug{cV%NFj2fjgztZ!+yoe zehKy>phWrb6;*G;NXwVcL5h6E*pIr*u7~CR)QpgB7K#YTkmFlYJN}64PF8>dfPw`N z|BiR*Wo^&rpQ2HHz7NA8K##zIjHIMXdD1*$x^G|g43D0+xgTXMpKS+}eX5}2$j4Z( z%h_+&#M_Xrs>J*(oCD<|YO3s$Uu~WS0p)I1#mf6cJh!gXyi87|#bq1;Sz=EZeU5XF ze{%8fpgmY4$oU(S0|OeRKn$OVjuVpe0zTGqhoQyfgkRxG+|C}vzwkS`Kihb{L<#tN z@o?X$y-{j<_g1pjtPSN;1plb5nT;i|*L@jkP7{BRJC|FTdmP4r>W$orf<*K(jOFHB z#KjLR1qk8Mw`E4L2hn9+6MX0quY&?;MVQL0k|z{vVRDI5RwaAP(Gs^cz{XBXwQ zP-Z~U&|7?P{EJV?IEV=o26?BRZ1FW~bOc@%rFg3u@WisYO9$M|JgpI`e{ zsj4-H345Y*y%lqQJO8RH6!SBhVsXb)e@frg)S__ookq>)UMFg?C8a!-QTCI!#NzH1 zB!&!t+WBzOqb}mFtw?Em;EcPy;?EUTg`&@O>-DJw{& z798WncZuC;=}8#L*fyKv(!!K53qF<6XP_TCa^{NH>{s@BQNxe$IOqFsWL;2!lVK|v z3hciM%A;C_v!4w?Kz2+JB@b=)WD$!Yli2Ub?nHq?v5~WK|LhA9}r&i%Pem-BS%N^Jo z@hS7`Hl$7t9iA3Inw334v}>2mR)%v-Ff@JUarpYHg8GGYxd>@=VTrgS5Oz5*>OFjU zqX792z&K@;z25-38R#2rAS=;xSAeo*!4M1SPwMg{U z=Iz!}VtW5lS&;4b7X0dG>aT7tuv^Zc1SbNJ$UMR#IqC&UIbw$0X0L|F=dEXN5|FN_ zZ6|I$V#`SW1G_O4@Ak?TJob1qJkxkq5G#{FxaT9iE?uy4mB4;$hN;nHz>Ol+UjI9P zhB_o(%AxH8KTY3&yUe-6siQXLr^2|YQa!AfvUGu3bI-+nL^y)I1*s7huP)xYSc0W4 zN3EZEC(IQRY2n6!D&OUGH5O!Nugz_}EDg{F{9h8@odYq*_Bk5atj%Qs`mLE@=C@KCd^74vCZRzj1RVM`oI>KJe2aM6$!0Fki zxRfLXpiTC~L6ry4qD#eIQ_!O0(8{BupK?ecaG%Ou{R#=FRm`os&_nkpIHdS;j@xMQeQMP`X1(x&`S*>5gG&1f)T_QxNIyR+<@L z=x%8QB!=#k?yft&_x|p;`7~#rv(Mi9S!?~*bF3<3qFw~_u{_|V6dm2V&Kzs>p?O&e zvn20}fQruQzCh0T6JMb-XUKjd4b#&0R(%2Y&C&_?!i5}U|HB{f{kHY7z(TD8&sJ6U{ood#F@lKKRzQva-j3B6;tyw$6wLzT zHhBZ$j(yesRvzbX5(=vCgxzc3uc1jI9LW@BAg3p1 zZCw9#=Om*-3JSF!Hc^toqp4Q)X8E**nbA(dIxO ztgh7&taM?S+I*0b{MaUx_y?Razq2*G_xz~;u_qm-am>4Z8zV*AhRTWE3zqiaR+>?Bcv1WWC498y;DTvY2R^V3+P`AAF09`nyk~6j zxRaRYrj4H@I&1C|RYSBvD~a+%C%mSM!wNpJrdeUspXeRkj z)XVH!9EI5Y#1P3=>bd* zVJ%PTqF`&=g`@^t`L3J|d@_MCP23`L+{~Eu_9NUOybrt_-@FaF6NqlT2bR7g=OgS5 zzg(R)LSh6vzIh#=7MtK35EztPEI+-sXNUmMDI|Qipv}W1_hhQDj8f$K8`g53 zv(?}_HkabC$HfM3s`h&b5l2S%?ll<{c?0cyb8@QyQ3Ug{;~P2vD|nJ!8Y)%D)k>~y zsRD(EK6DI&5%-IeTJ=fKH%oWk1<<)pMCUtb*oiGaQ*_Wmobc(4p=+Frb>Ad*efeJ(b_5N- zcxtp!dN+J%tKHYt&;0|BK457Trm{TSn;hQ?sG{n96qoUyz;NoOu0{NF!G!y=a&?rD z@x@9^{#(a+8~xj%1f_LW2RsS?#@x?!jXP20ta&(u;VR58h93bxDn&+VTUX+OF2DiA z&`deyp*HL#B|BB8l0t!eR7544D(vkR7c-L(M`$m7Vnwk&uFNJK$} z3n24rYcIWlp{`|UnY5$adZ4)UbV2Fi!{i|ER&@Y8a0=em@Ia&_1#syBakOHLxogLQY z%O)rxIxw7{m}!}#4ZJvteLb!o(+27|NOtAc>R#r*zgerI6oRC&0KRQ@z)=>+Fl~pr z8av<5gLPKeIcwIvqC=yDL-lsws(j8;ef4952QZP9Bfna&%=5}bhX&+Fc*-?!`sLs9 zDr^W{*nzp8l_wpxs#F@m?+H!jx`lG^VTkLab7I;<$Wn!-aTS;fT&aEoBw8;N>hWnm zX9KQI-?xwuVcOy-t?-v!p?pbYKB7wNUTqrdw-JQyCkX=VT(|CMdA*S{(7CsZLAKnZ z2PKy0TGoyUeo$~y1};7u98*;~2$*Xd9)ZZ5z_jdCER^cBT;2*`Rnq7}w=%@zc| z0<4+|L81P@T~ejYS$H^y`xTy$LiYXBD^hID!+}*$6X8~z@Bsprz9vt@d;Ni9!0ttR+y~>Q zBdYr*7Bm`Ao;>{IXBlXHF_!@X$}Glop6T4DET{=O0(k6U}e<-XB zoh$pbcbOv0J)^<>8i9z19P@rON<^E+iaDENohPRLSGn>7)ExF6-&7~ad2qr&oxfw1 zPhyx zV;Tik4<2`Pm4Mzggq!iNmd~1hL+}%1&MTKx4vQ}hOnm+5iuxwLmGOuNa)EK2WHJZ- z?WqbP@f&+ziGUai>vjEXp5!4WI(1yzHb?k^Bmwj49wCNC$S3p`mf^b}apj7OU=y2Q zLMdtE4~~rj%-ghgm=6MbA5CCi-FEFD{sxvY`R~s)Zh${YD6R|iU8&F4ZiEjz3Wf0X z%HPfFA;tRxX}`Tx>ZF|XstoLwU>uaYX3ZehHZCX$*sARLbgdX$*Q+6(aARa&V1rM` ze1W@dVLnjID^(y2{yQhIY_9md*6*UtA$Ve9h(4*S^>S5(5g=RKLSHVDAjvLa?#P{} z742;ZSNpYJ@B@*!ySIqT+bdpZrg%Nw7M(n|9ocQ*7`EBU6V`?!kBs->IrUcih(XER z>+%au415%)@CcC;MoGrGEO(9MMn&~}u(9Y5VoV%TQkr6MgforoLjO~D{JfP~Q55IQ z2Ixp7>}Scxkj|gA4ZqCGJ-Gtd^~?_ZY2F37k2PPlDoU^tUdwR<4Ff13J^~!Yrkn+Q zAm6*JAnN%e$kyKQh55gyk2J$VN~`jlnVfCBcQ*A;L&DoEbtcBACzlS1ut3tGgZx1D z(B~}1b@5$^c*^KjgU`&NdJGWholVWt4WGTX?~tkE?dH7mef{1)l+G4c)V_G)(X-P@ z`Q&l1gIZ@B*zZWD=YT3E9p(oWU3QP4Uf0%#uPYw09J!A_UP#So6@E2@TUdD^x5jxxuQsObabqa!YYm`I2&H zm7B0UN&b(2@QrV@bi$ewSvY|#B+&U|qn&$*2a6skZP>oDVFO~&a&=Vks_(H2U5o0x zE5=WS*x1Ucx~sg~bt+NLq~l0c@|ACx>srCH&p?F3Zd zRz&to$u_z~O1Z($hT$PPq`zE#na1z^SrJdw33KofLEPB8<8LC7=3I}ACKhM>omO$H z!Sc9LZG8u~q1La6fi)>Uvanx6mk@EGn~&L}SAM51mC(}}=?HP3SQP_hpIEz@g>-{v zNTltMZ@{|)v}LOP%YR2A?;%u&RV`To?~9Q56bPtrnM>iXEtT8cF{*oKCZWrdK~FTHQ~@kUa6CsWcdWewd|Pdr^$Vz#=A8UxGScd+GNe04W!L=5dG2g6X|UX5qF7rS z@8K3M!lw|oELSHD7>IS~J^bZA8&k1WeA9ENiHUA;H58T;ISNCQG>Xj?43J~RHY->D zlfR$RmgwJ7Hox31ktHFhK2SngBF8$4nME?&)J%p8owxY*N#A9%yn`T$NAD?Ln>!1& zE=))4%fSbuCQfIY3AI~WiRIcNK74qGvnTDzh!GJ_UUE!5?-Zpts18E8+XfP1i9}tW zuQrZ50Y5BE@h@3jIR1RE8#O?yVw==h0&@-Y6lOM_UP;;i+RI5^sAlcO z>eU5FB2VaM$^;MElIQ`xKn*>MGNZmx%PofP3?6v7@0-30BeTucuP29wt8RS8z9%qi z6HCCA{ySm}#IV`!I_$WQgJ7nrHHCcvCk64@9|?#YTPaVGe?O9&5ptuHp@00HW9>vz z=>4Bg&(8C6Mh`mLPw-7*#r}LJPj%s+B(jpfDrf5@r2DntKKZO}>0Dd>9AQIMtuH_w zp${-2XD|)>H1?+80d8kBm_@20MyaKF8g4ZUvXq%Ft9&gXMc6iZ2f0>CyV$=W!#@XPsZ5LKBx@?&8l~fI|5{N zo=E1Jt;XOS8hKZZgO7S}9?v5-BiVxfLfg68r*x1|Yvy;%M(+ytt_$4smhTG5traRddBKm;j zwQ{@r?ed{a!l3vwU}MLG_dJrQ zFJ$WXZ3cCeE)D;iLx5;<=xC88Q)nOG@-HUiRmo-W-`MoQ9__p;?Kf^TtT6NOO~Eeu21?fUC&3*iq(th%&f6ks z(-@MFp=2D-C71y9csefiEt+Qz;>Xw8rd0mhf_1Ly@TNw4IO5(xQofkpk=_|_mmPN* z3r%vStoU`N95|IhX1e7$)!aR?g~}gwFau%#w&Z&G`vV=baYxR9zcWMp8EGYSA0E8c z+GiHb4@po(Axfgxe)FM8Zn!{;6SU_5CltJKZns_d3`@7ECaCyrkU|am@ka8Ad zWQw1Nj5;(7Ef_Hq%M(~YU}=t3s9$DKNP4~(l0tSz(8C@lLC=c*e5SB{Zw)x-ZGxc8 zIsMbi7P*tvhV;Yjs5`0?H$X#E#&(}wDifRFM|?B2WSUP#4dPd7^%ZH2B{A`6na4^4 z9bl6_>)~}sIV;Kx3$0D>qQOng^iA(Zfsyt+&=?r=4$_90VQ!L~+^(*dA|`ldp_0uH zyUFjixd;DPVa~?8a&lH%jTSZKc~gSv*z0v>HnPpqIH(}N1Au$_^8Y7zTJ`&~bf8|5 zA<1bqX?`o(Z(Nu*xLO^y#L%n0XrLY{H;Ukmn-6i;)|n;Qa=m{52apxOcuXvS-qT4k zm(qII22s`ib_mM-6a|(x=lyr;o~e(R!ED6(jGYL)C^(-~MW&=7{_!7sG6HnO@^Z-q zb++~=|!ox4Q03e^! z*FI6>;7Z2t5ze#qEPAC6?2MjB{sr#{ms}RuMolQ_9mtb0*ZWPkez`GZLwOcT_O@UH zh)dmu5yWwmmw38k)ejMvQRW$n20%GG?Kf16e?Ae$-7-jocj^ZbU#QgQs+N)`9o^xK zOsdQS;-%xn*jGTy@6I`2OZ%)5&<5%}_g*CCkG;Y@sADY9suuY~!049K9DNp^eB*`= zkMlJF+?nIb_J+nGt9XvG7S1T53ssc+XS8pB$P$?m>R!jxpH^LI5;`i4mi>B7QFu#O zg)I;JGbz_{`3oGLbIyK6zGz`{BQv#9^y=n^~16;>ZB=6oqPGSpIgJe z_0p7m2)PGF7gNG7xvxj*lfWEsqj)Sm_ngwYiSmpNYWZYW*+X*-;7M{kKX;_10Pm9IAX38kLquUPj>$q7+@`-v_l{Pn^YHDIbP7 zYz#NffZBWtY@U=Eu8q-vsbi(mdoMw z60B}IZz=_RQp0p|O07Ns8?NwZF1n8M{1E$`Si6(NvsnY6Bkw zCKecb%&YW%X|)j3D*J2$#f-=)JrYQ}QEUPB9<*C>R-{%05x*KcpcnEez_i1qxrj1X z^DJB^4+_aA9o=fz2L-UP>z^YBTiBKLZ45%eG;XrkFw+G6UtPauhVg+7`(PNeG1?)7 z97Sl;s*e1ywH*9)0Rc3+OV>$7LKxHqIPE7350(6kuT>uZg%-|M)90em>|uH3C+TR8 zr~Bl5T!)c!;d)Ogi?4dljHhlVY5EKK^u+zLF8YVnzn&FhnwhCw(hQ>gtdMc7nx42rx)&5jC~7C>LLj-#~qw{EX-kMjuP+|{`!{wNHi43p7@FP z0{po#b#cb4UHCNmO8-6)tN#<7pcL=s`RR`2ABlb$vA=IEa}#l!#h=ogj zE{a0y-E(|f_yI-$`#tqv$o`>UHh!u~N~0l8%t#LA`C#OH=ZgDLi90Cx$;&R{+QrrO zZ6|ydJoSJ+!+Ub4vnhyV2!WPF;y}SG*`MGl;Rus!-x}OdX=$&{(Aor|{?h8x4JzB^ z&aXG)5T3=0N+MJK=d|dwU%JPu4aLH%3 zh|?F}mNAipTml^&(iqF9XMM@KdFIX!ulSDXX86QdG&>eebed*(l!)@Nxw4eTm!Tu@ zGz3Y4+tkkavk86Fn?uDKj+n} zp<_Vc8AR;A}>y4bsDt`UqH-U}WVGHOsn{)E*)sfdw zsJ@*YUwwC$y4-fP22|6;fUT1?U*}Y0TRand5AI-%o8yOVJSyr9uJ$SO&|V9(w4L>r zB@F4BTj0kDjU50Ths9sB>0deCjUV;dioPz#3$<1UP-z*z=heMXoc8xyy;C+x0c%YB zAlD9&z+1b$Vynbh=%|pUqE^4Uuy2=nkoAPNGPeJnO35s2NwxhdGx(d^7frrBNp{6Gh2$FQJU4`tip z^xgkWrbb`+?!(L&s{31*WWgo1=BB^H|8uVRmf?fbK7t@@YI~iZv+q^&$C~BkA7dU| ze4yU#hD|I;@|$xn(JVR;tZu_UQe^e(Xj9*yalk|4uIk=5#>Y1&lnA#%!*N~sV==CG zKiEMeXw8xKT19vPizBTkdF(h&w;vs)mUda|{69IWT0qrvk0HZP+LMP6_iTMAl$E_< zOVbpj1A9s=eRS`+gwBHVlZN+KwExtcTsXz^VqduSSO&gK-H^j?mIctYt$=`OXuJHk zzDTOF3qp=0X&EwSFJSRXcgg~w>rh6-#7`5~hF(3pt|!I6+aK{&fOsLDz=vlHG^eoH z`Ln6c`7HXjhN@vQ)NywJtVzdF0Y&QOpY2;+Gcy=$ik?2Httyj>z_ zZqNL;$;|JlRVHoi_s~eFo*DOHPu=5*2Api!vx-cA_p=kKa4`b$`|pa}js2=g^RH7) zMQQK{)~qDB5zP301+Ut4MjAfk^?U`w?%TCEQdpKD`6H)j?zhfYo;}~&s?HD3RdiCW zGx2>AxZwi8uGE-Q6e?Hg?xEZZg&|m7@U&=d{KH5U_B`F?-`v@ERI$pI?YaIHanyNR) zJ|*C-H~Px-*_*sRQxgSiRJ#B6y(K{_|mSUec`boP!V zav~HGN;9HQN|J_md<{I((Qr13v+$3@o{VT*1l8LCVIKVfZ(QJ0>IO~Ia4_#G_jVx04Og*sdIAb%wyLNOx<)XT#9@~(uP zhPuucSvDGNEv7U@FmEIZ@7*~P8^#JtsaOW6$A=1ZS`|c7it65tlCEdugsb~iH@whf zZdAR)!FHZujCnG9W$}WL!5J;XVR4F6oJt6Xmi;TF0C!`#PjzIULD$0r_C%U_A!G#TEc8fTeuJP@_cDv zSJ1;nCAR&~5UT=_Svvx)m*+_^d2ZADG~I;LuO5ZDSJr{MH=kUHf$q)=N>w}s>G^~| z4wx&eJc5!RRQ%j%NbG~iqqHAl9s zhk_IbC)}YmNIM3DJXiqm8o8V7)v5z;Cv|8fZ@zJMQ=8x9Ty_SC2)Au)KN!^$JTh8h zvgj;w9%o7Tqmw||cjHTJea6dHrgXZxW;s32bAib^tk0}>^ zA<`36tc^9>^$};^G>)rQxiB`-cG#<52SuqYbiwxEPy1>W20rJozAtzh855%_YDEAj z&##@m#8d9JzFSy?g-RP+C7Q9u-h2rTgHpnQne1>Zc2L@JsYDlvOUXbw7CSpxtG5utq%* zXZE?dtf?0-d@LpJO=>1tHQ!4>&|nK<);51hmJI&A?Scc|j~+5mjgUvrG$=2lK$t?q zqQof$p*fc8YDg^wD$_)QN)+})s` zu*EbMR-peMVvc{gd5jiZ27RDNRQ= zc=Eyc@Pm%(r5(e!WY_Der>n! zFmlfNN9ZNrUlOi_E9V2=E4z50PWJs(YiGUDI%~he8!>9**-jMPtNue6vf7MYnv5@a zI7@TUt%tocJH@v$YDT&s8lYmh>26K+&t?AA88M2tmaM=p>(uwy;Z0OC72_%2^BdwqNs`G#U5x2w%ccCD-NQopesY$zn4ITzq-JjtJ~vg^2LKI8;Qpa z*3ggbTYqj3>2qD{6Pm9VJqoTj=EgVWj(^uaKH~*}ZO&{%5%+Vzp78dz{cO)5@iuv# zL%VS28LZ9Aw!H8^FD!p`Ic5s;n3L*eAZ+I4A{_qbSr6n~R z;NadP&Jz%Plb(pt+IxIn)#lY6Db&($+NW7g@b}G{5u2TQAX3nV9y2o<$-sfDAGd|b zoF2?q`MQB#ANe7{5N&1<1A~0t zbz73|tru3RYMWOZ*t^t!rU;PECz%v9${R9$?MdHsvT1eqy#wr7ASTx+H1kXT(Au?q z2!qhp%vUdR3`=T?x4u3NM%E=Aj3^a@5xEvT`w5qd44XVUB{`Y=Oz9*T1eF!-4X;aT zxz2BC;kNS_mgG|LM-yvp=zA3qK(~ozGk5~Y`){NnWHF637!9rAv)Ck~ko&!lodm*-#Yae-6is9IxS#nob?Gf#{Av~9xXz0$$RD8IkenLu>itLRLzpXPCmW6r9ca{Tio%?hbhcbY1jBprhOmWACa z+q7$Ia_t=I_H*!+^J4=-4m+(+)coAzkym@@QNY(gL{nt&2?Dv%C z{LLf1>2vdhj;`w+1cHt@K&7wjgZFF-RZaKO;Q}*2%Gq3UV_l@hsTvK7QLh{7Euk z8{H&N=kO%Yv%waH$I%uA#xu2|JJM@Lz3)YJC`q>l%JV*+wb<#b3YK})NJ5lS_r!Y@ z7Xt9^W1b;o->ers0gU2#q-S5^q{C^xXKGdUQUzPL9yb4k9cAkr*@NRYcrql-^D`Hy z17Fb`9PPlh4>@7*MlJIxAm?l@8QQ-W_E?#Nu*H}ResRu~QbnN}1K&SgfHBX9cTv=I zW5l1s5z}7~yfZdaI$La8+UM;fmNw?)Ec-6yG4upDG~tyxaVn*{xtc5Dkk1qz!r@c0 zuRP47AKI|J^s7c!k)Kh!k^0XqEGi$53wjk{gZgbrt?n_V%uy>Kjxfxk>42*BvnFRID4%sVpyQ%p=R`X7sH)=x zCb|zvgvdN-4eA9Jh?%ra_;#Fo*W@?P zY#Mzy(W|dmQadzJxiz>$KXJ1Voa;Gl=tvT-=QDi=RC*q$%nQ4RqxH%q*^%4-bdo3m zuOL6b_tuu=Sm%-8M8>h2pAQ#r0T#QrxvK?LE=)^~a>NhvGWFyXWlTc8gLN-T@PsuH?Wv7z88?<#C(w#@%Lql_+~pa%(2$RvVTb4-yw6DMpGV-d3PbywwKOkzWnJ zMWSpJs|X{cBlm;D29l3e;5&}**M|emE;7KzgDF)X8J~JM$ycS}sVKZO8SZP(Lm_v} z&V!9g;utqLZddj1D>bV6Z2R+%L?}pGf?02m5kRam<^^h7M;9ezZNPEiHCe7p=lrY*+Td7^z?LQ&9_B1O6C60N-{I@R^ z)j@K{bKFKsJ(5J6{fs7_rSBi4^DrZPSUMW9?~$BbQZ5~X?Euf5aH1K}5njP7utel0 zE8ukCt}BN=zOM#bL{|f@PWu`VVjr&n;QoNp=_k({sgp(3fI~fnQsBmF31#%0}tcS zbq>u=weED67wq;GPPg9zre+4cBca2`4bCoOQ^pMLy!WFWI02t;jo|6n|EdsZCyiif zu~x9^p8Y8#YWrkj%e=kw`Lq1nS{R`{y>xZn)`>- zN{QNHBlDc-;G3d1Y=CUn0}FF%@>lyWC@6-fmO5x+6TEu)>BVa~FIF+`W2T8Vn6j4? z0@s2ut1V6UY4J#<;FczV-m_^zZ_S@t004n|=zPV&!HKUzt#)kV;@jmLs&r&*@ipqD z%hE`V(1bwz%N<6AESF~sJ`}*B(MQAkA}~4Ln+l{&wtKMKJN(_4`O-86zJ-eC(7(t? zlJ5}y7zZYSGv6-|0Lp)lh{Xk3ZGFM_r(gfk-CY*)3R_L|qcNbwSd{V*G#ckad<}f! zgd4o;cl-vsz+1joB2*7{(%;Acg+g;jiH_K)iE_mgowzriQJzSmn+9%n-{+$SB(h{` z9oOJfqTR^dyyftgd+FAoG+CB7BhrCI{tJF|ECJZq)d^O$!|FzsV+b}^7}Eu;qK4Dj zh`o3@&!00_NH=Br%1UuqvGc8dA==2M@#@fsb4;np`tqH~7vw2iy_wkmFnhV@;+e-1 zOw#0IIMCRW?~l{9aM0KydJZV*_F{6BTd62ts@GFhVneqf`Rfwnh95A|26k1Od6ILa z7|`OAb~K&J>1|9iF5L*DK)AKgfytk4K2rQ4)18rmIhKSg6>#dfxeu@wMI~Fecmxg3&{PZS_2al5; zjuB;r(y&K`gKl~h0jV6tJ|cI|?g5sa;O7FmpXTgW;BZ&iX4yVN!0W$q93mLQs`|bg zZ?@V<9%ir-#swZmcqvd`0*qgW$!>792UHXC#;n zD#AoUBBjU{{2e5mpg<_ub-9*Ye$w|M&+6M?z~nw}co6-L&vPU5I#uf3749dKJGO?v zrVzAL8TqIp1}*VrmkvC!3bDJ#KUz0#gKExj#SJIiW$7J>@@_Q6No?EGt`d6rtX8Az z96z+y+^f#aoWw!4ffd$012iGj)!_RiG6qVxOS|l=`?!M)%;FgRKUnUKHLOM=-is-e z!0}@d$2kkX1fYy813#SurC$Iy?*ar2Cbx>#%PJIq?tQn4knc#HS48ex8X3zarH%}b zlrF>0gn(U=?1($+${qNvmM-7@8|ylg^Y$R;EEUO_lKd=84;(aUZ8+pFO| z4f2#d$b;*ck-l6G^|7mRTLMgf;E^+|;l zbo@m9s#tnW$J0L+_lX>S*d{q2RyMCDK$*w4Gt1NdC~aimQ&mE3*=>x?R6Tza^JVw> zCA=?K{+lMx=gB^LH<672sP_J@y(qRZU=9jyKAmxR4Z^w19FdhHc7F5O zoWTA{)Uj@rHJoGYC<%2)R3D2!^}VBvQ;y1&1sL#kM7MyW4Gi z%Z_P9I?*y*S04@@A>npXwiSY@95zRG`WxaBF2{fI4+i|6Y&KqSGy($u<^|mHSFq0T zXE&^=oP`$uyfLncoL)hTM?((38kz1tS0(tM;hGX0OvYj6_;_38X|ZuVha zU72mNGRMMR6ySZ_BY|b(6rrosyE?|!Fi-*Z44fUnoXzu(pM3ydR-7vvWw0j6ZzLwR z|M?qHmQa;TK1N=lMIbxLrZ)lD<{BCKG(P zDM`zDlHt6;m4ggh4wEdnW6V?3VMM*OsElnW(f2y3)op1^{C-=Gi1Y6a9h?)61L**K zbYSPCMYlR8R$9yz$rVR4H;wLGQ%n9L!*e*jB8mAR{9UEfGfC<#r3!F3nrDm;NlSy= z6qQE5a)0@&vMcf^#A>=4zd{ld!%GKDS@#}fyU#gA^aP)f(UaD?Srm@bq$ z2;7nQ{G9+fiU!uka!y}zMAtdTb_Oyv?JIWg@pl8^$Qb5p2)z3n8v@y$OOb!2Kd-;^ zA$0BLm#72i&H2`Dc--LmKYjwC6v3NeqK3vw7~`8=+i_+p!x<3V6XOP3gEexv0Ft;0A$ zm=kOS&pK&aQ8Lb1B2WU-`%t>srL<8lkoNI|GS-7h&?FhIf)RqU=PC4KfQxfqw@@C* zs8Q2D>jzX+bDviAN8uz@_m3bO7^-v{jr_d-LAMQEA=vK1pVwgyV2V6Kd!sU=y??A? zj)D{~i`H?DVGY`1@&zrws`2~&j|tyf=4h5rJz>`O{)I$9QI$v$mjy}89Cb|dSD?kB zi8?tN`cpkL*2lPlI;IP}D;hPw*eFPdIq=c7K4UY2fM-D|z9otfu)qMzNVUJ2$sS|@ zTOl#02Qx z_wyXhm$~p)R*`Ct8c;b-G<#85?3E~<<(W*0#R?}EeOQ0H@xD^ao0eM5vg(ps$o7{%P_o z-NWJ6N4>EK6MRwFv~rJ_fwdrpZ@dl`l)zM%{0M-`im}tg>-B`~blCh^n=S)`!*YFy z+rU0&i1ix`BPZf-*H(LOd{v~k3&N^-BG7tA=&kaIW`zfRxZw?u$b;=_t^Q(`Omw0% zc)1{h#;~kBK8A+WNDrNG0ZXd~0S*sDalM}Y6b}F(2%zv$O8e7|5*+sX$IOpr8M2MQ z`ZN-D(rIx56D#A|nck-Ew8JLiBF!BND-P8B1V`fY^Y)k8N|%J_V5imHU$##^?>09} z4DyJv4}pc3XXkVPcNvw8=)@g9V&d(rn*65sh;_5a+D$o8^%hNKlaaU%YpR2_jb)K9 z@OA&0-KHaa;>+c0fGIHzXM-pdB;DH9TxWzBm0PP8KpH+}W{;a-uj2o=)&#vdZ&We7 zY>?eG{=we}Uk61W_`{_o>f51NYuuDVkxFLx#6JcF~t&*nF>Ie9^%Mluy+PJh@a??x5;EMzxRe*dt{76d>#B!%sm<{;UNx?c$@Ac;@(rmf? zwv^J0a?A#ADMeFesMdgUmPrQ~*nliC0^igP$f~VhweCceNak`yot}y1f}ig!I1IPk zF}GtcoqJa=gy=oz-|0$R=2GecizdDf$BXO9%tSqnsQR>f>pPT>&{kn|8sf-p6x9jO z7Gloy`vp(fRyn>wm@{ZU7~f1SDC$d|5tJa%&r{GO8q+mj@B8rk?T2E3Bvy2f6p^gw zLYNH^+|3-3?ZS+rc`Zf@%V*cdNtle{apDjAIqD_tF6s*{3+CoeyHy8JkNkmpVaY^} zg6qQ;u{ln4bdwlog!g8mfY6`VzsGBbubWk*$?fH_wxU;wJkGzhnBBHcw zIzK^dWQ>CVjYXbPwsheu!F$L0au22N=8xjz7Z>1M6$f)*-=g8MCQO`fmvwM#+Z#kCi#mI3|09IxPCF(;*_Jp=Z-GRo53*lw z4D_Gd3)SSq);Y3z6#|$$N^y}$3GhSnaI7EC`bLQFnU?eVVBjkm*F3e1a(o4Bnt0Ao zNE^qYeZUDXO`IjuR~$tBPfZ#Jnc?3T-7PA$P|Dm`|6ZKuOscCRjEMY(k4EKfD1BS!w<?T(DrGe z)IWR!*t1pEvUpUtX^Q76l;ec!*2CR?KvzvBz)V{I^kF4l-({AgFhc$x3|hqp`MU#T z-IEuorOtpqb8l6Pr3FY)pjw~s?e52rDV{4V@$N) zhPW>l#DWED;k9*sWAoQ&HUPNGbk+#TjhFbYy391=w_$UMecBQ5ehdfunktCu&JHT} zzO>$-(EV5|kQimo|U z4CoetMip~3_k&qfcD%SSZ^l<>2$07(ICyes2wr~szG^IR;f^Xu&7ow4IN+QiNc=epn7->MXZuFN8#B&Uk*H8kaRaRNqiZ%SAFy8tpx0b(t zabm0eV(K%3E>w5Lc&X((Nd9NlWjb-I)@R=py_HBvu@`%>TyuwOb+(`?e zq_I`L1a!Z7FsoHD08t;Pc4o%Fp_X>?FDb$Y9Uh=YIW{1(vhF!JU!QAA+Zb)xCn`{z z++r4+*MSMb=z1gQE(nN21>6%#$#d4QxFgcY7QayEDULuw$VjwA&uN@RY~+`w~0DlfIELnvMlh_wb|d@SCFb6PHHBT8Nc9WZiO zl*SXzP)zx7=OS|MayhJsTlkbn&OpCiV>+^7f|L43smS2{#EiFQ)2#gmJ`qLirg@4F^Qh&}lajtDYsTbzQo-rdj)41I}CWR<#+}#M$j>w>#HrlP3|V zxC!@PItR`lD^NyeJ{INvP`zz>rKx{)9&p%d7jBiM7^Fa*G?;~fo5r)ZqHc!sBgqT> z2LU_%;ND6uhV3-Xvm>B146IdPN2Xt`KntrDM$!oN@jAP=zE~V0yrJT$#Z%tEL$zam z=73)#>i?6Q`(#R7JB|^Gk*+M9%PAHn{qdme_^=Bmt66{_?+~N6C5Y*p3sI*WkNy3P-v`y3aab<8#G9t^)9?{`2}{u3 zMr%hOj;g}Q{*kH9umcPSaZQZ}Fh-1pDooK1Mn8>9Aq zUUSTE9cAI3NZA7>99{o3v@gqpvoX1_mUdL)c$!hn*2keEZ%Jdhx1!HNUh`3Ggc1?} z8kQIQNHGsJ)YQ*P3U%f213Ur7hfR^+|KyV+{NJP1(`MJ~!^C-T6Ir03sXUJ7U~Y{@ zzbrAEt8d$QWW)ogUYBhPg4^ZSC7&E4t>|F=qT6nGs0j@ z$7G@~Z^P+CPKN4Wo$HcFAj2^aPPGXOM+V7xht|!8?fcUqZ{W$W64hPW%$@IrFgdlH zD&>ppe^5Fc?T592Z%nI>eTh_R1tF}dI+QIQ_BG9Nv$)i+Po~)Ey>J8tq_`tGhdw*b z3_O*zvGMO~>?XAS?Oi>@s)DN^ad(Y!-Xf}`w?%k2+2@=U>7|*K-XtJZH|+gK!X*yU zZbG-jA>mTjG2a_|0fh_+jyTPG_1(6M6(QhBo7Pn&oOrXbiX$4*BxR2J)oLiJL0ani z&oOj~3F@PhN%gQ8?7*Aks#jN+{ph0e$R;~ zCdS2Ms}OEbBDDqM#gDZzY6H!A{(Vm~br$T^y)i-zJt+f2Ai+1d4Sc@uL|C51HE)zk zbNz~;s%NfH$~qznvQgzjUJei8xn zuf!sGqy2t~d@3~dYBx^{z8F)3m(vdZ)!!aRg}dPX1eN<7nHz2;ZiYi{Tq#$-y_K&~ zopI6MU-$H0;zr{{>gCv~BO@X8zR&4gM95bSp(_>4ffZGIv1GIUSlPXp8`X5H?<&Ip zt3$Pdim~GZFC;Q>DGd+P@IyUP(*@ActD!0t0;5mid)t>kS3daf&p7`NSzi^O>C=R8921#*u_ZA4jgHtrP%RT-7d!Oz@UXmj- zlXLd$S+mz#CaWWS>a?;=A>~MjPsivb9UV$s8@$@;StYFfec8U5=v_#i8bzTSs$d3e zk{bU$fv+e;UF&<(kN3OCGg83bJbuJ2XN(y165kytI8Icoq7w_9gr>l+4-+*yD(w(k zr_wrB2mguDABVpLH4Lc?dJb#S%$bSOE$D6)WWbP#{2dYv%XMw10S~5M0Lu7{+B1Z1 zA5kdv+8G<#q_5x}s6qkMzdm=~Gw{?!0{xfsS$hXjVEUox817Y08$+f8fBNN+@wfMp zj1NmAYPYwq#8yXX77iM&O>19US4(D7N4skyy!|IqgTFNF&U-gp6$_{Kz?0OMCVtVN z3BLk9l7AO}EVn|}^@8ikxFO?lqu!O9ydH)KYqIl3dTyCRh<}NAS&d>%`Xwd4Uhw80 z;lXo2q=48@d=QvHICRR6Q1Pl&{D#tlg80HIkfw-8`FE%9CAPUj@l3&U02el_~V~c&G{^x+^hnYqekY_V&9 z)vF35D@LocZ`}%&=&WFmN*K6;Edi>w*z>AAwp7fy3Cgj z4JPM^3ewv&>n1rnzwcKxzj2m`N=||}*$$1CPdyl~gI9o+5^TThM5EW8d)x|-0*tlt zaD&%VVf3Alz>$-aCRbF?on0Y;m!P*;6913_t43t zx%^|-vzW75!Tn+3>DLI(Uu~!LS7R6-xe4*6OxeC}YKI1#=bVY3mG$Kf^XY&z#k&h3`T%wz zx~J^H&LQENWDL7o{PU#e9Gt*BmO5IRi@Zg1bJ2zy!GB%KEOfbeVb_el{q>7bAHh*e zNR%a&Uytn+ihY85M!k`UUmJ;^XU_Z?Ijj!9d=LO_etW=Us8#XqpWZH-ssH2H!8LI{ zS|qARyS26$OSa%&!92$X_+*V$M2kV4hQL)PTZ;wG)=jURXD^pC$thK`Sw&3TZ__4A zUCnOUZW2{>$yHLxhfa$psW!MX#x3o+UtZJgctb1N{=|L}?@T%e3!pmojWXWr5LV}c zBJ-wUDksNV$~g}AiOw7a0d?B#nbu1qBPN`*Rn!&p5{8S*0hT5kz=oO8Lj7O;EY+}#Z= zbNKOvAr$ljOzMn_G`igJmk{H*a2c~-`5zvRS@mJTMH{!-Ju{@bH2<(hEo!hV~X07!fTqws! zr66CtS!AJC3Sh(O>Vfn1%Z58h{WFO|cg@TaGL?YR8`Z91&z=~%t{zHP&LW;JN?xra z^z}+EJU$9Y1$$q?-VoD6Vo1xZoWZ@-T{bk9X+foivu$=sUAZy`&2ZMLKBJ?LnYp=A z&_API^N-2Xz1(#4cl+5cJs6?nojNviCYAqVZbWC*_$={{E^`^N#?PbRey9N?3)dVd zay+aAOSfm(RI zgs@m$cxaT+Z;F+(mkjcaD!j1&o$zT8Jd@DT`cAwxiyxKppXx6Kv(>(}Mdd&Z=Ko;o ztjC%1GJq~3|9EliX&JVDlPkD1;|%XD>!S0)UfW zf^GvHkio)>i}ck7=602{%Q+E)koPYHqODQ9RR8`YzV}gF8CC}w-OW0^qGWPk<1f^O z%lGnO>Y8I}E%kbX^5Rl32mQvAX-mTyhch(<1!pb)03Emqm@7cUNqML0)|(Pp_0(+sxgzZO&z0rME_(Skp{#l@05D54F1vD;fr5(#%1IJj)1QzW%qrwYyA z7*MER+HP)VFGYk8j`!gP`?}F1Z9MhPD#>e#u;)&OTcPL2yV>V*e0lZnLJ2Qd3^wfM78`8nDeC%$NMc?QDd==%7cRH;f1l)@5!7urqm&lB5Ca$(IJ^0`iA$MKFho zW`t7cJ<@`sra8J93Ks=0YNC|dxa7U{1-lUKjHQd1lks;P0cnZsizoZ);8tBQWV1}= z&Q1EF_z+l?W>84ltX?C8n*x!7{K!fEfzUw)v}q%C6?8G9dL z$3fH!-yzSg)~K0nDZrmlSy`Ejz)x~N&SSyd@Pw8S6rjnzaM>7B4?lkYo(`MLQm3kAvT zxgb0h{oM9d(w~1w5;-MHK>je zd&2&apKt4MrRGAw$O6gQrL+Io##ujbG>s2jPybeiB z(gWijcTsVt+J(x1Y0C{%GtD$At{ws>)ywscGsE$&B32b1Uic#izHDpZtW3PFNg3!b ze9zqeu5SP3iMJG4s7LsI?c#_a%eUG_OgDKHBAEI2n0zs(QGD@j{k3Mhx&z&LYy{6- zQhLn*uHP^Pjlw@?I2|Zw(DJzOn&&BWMtqoHY0zPNg`h~bJ=69US}==LRd4-$?m)C4 zCxj@ooI7qZcqJjkX4=j8^`6(4m1DgJ(h`4fClEI;)y(Va8B$GtynV%9ri?~PLJrj! zlp155^nQOH3nC)fPMJNpD{c7kFG*qfB03Bjp)j58 zDM7tz_1v3A@s+g!vXBo63*H;wXCf8Gt)Jgv`P`aZlHTVy4sy!{O%r!mF`ofG7P4fv=7P|?Kppi;ns*ng*Cwtv0U69kH^ppwN&H)WEyVr$P0PN-P3R<5A`teCcHlZ zw2z0w5@5#CaQ@Gur7D77#%d@B`etjVO=wHLKV?h4!1J-gzJq-Ohh-@JMsDlZtb;ZF zFi|w$sA${<`SD~cRPoiG^>mQPrh7Y=^S8;H+36FwW0mh0#!6q#9PNc_yVE6Jr)HTn z^M;dit@)ee@wy~r7CCAUj^0Yrb!h-*MCYh^Yx5TKd5yS%uw#>6Rs&u4GqheAvmQ#*FfyEn0o&PwefOJ z<9qF9v&c^REphbOWG~BR?oE#Cs3Wl55*O%bN|ydmx)|W)viFrGrRSrzN)O@R^6lv1 z?i*-y)E%LiYcK2kp`prm$<+E_q!zf3OkdradS?#`wX{DtWBLBF=c^jnZ|8cpLl8(r zGWK=J+)3V7R*SEO)m;$>+=AH_JH9QK-GoRd_0JYaG!~h?pqtSnJ4dg>&&156qy9!R zt>=v&Dylm2X1DE@H8X7BZUXBi?g1x}jcZxz?!5p8w*BwuT+^W;l9yH#}ola{4)V5LcU>xmK~ zB@nl>MVi_uuZ=z1PwGJ;TsisO~} z{TGu9l_12=w@=pxZlF<$fZPW);$<*?WdguPb}#|c)ID9-22b&$LjY3pgfT6M8@Ggn zbz#q)xH(!NRN#4N^rwh_j6#EBLQ8sw&+H&ftYSKoHeug{4xI|$4_OoC^{Rt4jqRr~ ze&C7K{IfljQ3$QX&V`dv($!qY7V;}f_n#_UhWklFhF5KY&It5b%6_K<$5)3;Q)>)~ zKQsqU_dVW!hLV0EE{`wEp~x-(pG8`Ooo6(CZKT&Oe<2i$en_vz>yl5G9PfVLP56-d zoHEC%+sVnTt8i@d4gQQ?jF_B$U%1#cy5y*W6)s&(l{qo15qaJnVI`_ucSQc?|B?7v zi49tmD^sl}ptuZ5S+`{diT+I%(Ua>!YUyNmYtKqy%?BGood9#c|Nk=gdC0vV10dZ& z&pOr*za8PCX^R^f{b$0tHcu${)(Zmn0MnD2g!|ixOdo%HCr;|Xa_)w%mlqap*+F`3 zZ-l(I1bDFm|5xrHIFzBfJ|}qImEBPI%B+J(W`UdR^6^az`(36JjaVmj6DO7(s!`L`RmaUMeDPsxVPiX=s%*wuz$26fHlY;rI8+)z3G(umzMm+yk@e$TAYU@yy*9V#Uk^KIwY)Hk|Kf^j=@BW5%Xkn{|0O?H;@l4KxJ# z1mN)E0t%*KLtw}25iR28)Hj3pwPb>(Sh4$dQ{Q0FJK2Q-qfkI}o!!>PPh zbc>a^ZndJ6;j7Q(nTfD+a|lkg08viDhm}||`c^%?b-Dx{FeWbjH~dgM9h*^X)|-~v zYvv6s_aIw?ON^ho|Gk$1pELNxv%hk2OMRp6x*8Qia8UgHDTx?tGgdWD@UdN#?dDBSD}77!&*!T#UmLI&i=OU0%c#g58fz zV^fkKAvUJ)Z^D4_vhFvBc;0?(p^-9`%FCBWV#l#Wk-l$R=f{J=py?jz1Pa$_RrhpK zrYoMfZfa^8tB^um8HL{Syg6L~9$*~VE9qWKu}r@H4hJ9hhc5Yn`?85D=p)3X*b7s1 zWlSYD@&f&~2VsEFaxi5vw23}~9u%ORzp1)Qbn>XvfOFa2F}KvUt5C!S5`B_!fjCho z<3az+G!=o<^@%+f*NFGmVnEJK!?d2Q@JjBTKTxDO$o-KXBxV)o- zDBWO?R~+-%^m1$=nKcyx)aYizH|_kW`Myz!1rtL~7I*m*6y5*IylRp7&oSUSqjuF^ zGg(eWLXZf$6Azw4*!u?;7=6c{X&6Ee#r=I|>s*C}&@KXvDs(f|M0`6AN6PZKJ2Rb> z0k^zY1yrf`ABeLqK0FgdKjNrhH?5t1kTCkm5xUkRXDutTVrAGvn+X<;k>+ui_JWv8 z6P;)QQ8PN8{nF!Eh-{JN-8;sRpMXj*o48Z2!XsDy#Z#~xIfOJ0*->E9+KFv@983m5 z?tM48qaV1;+w3L?-MureQD>bs484So_kC|xvD>9nN^s`c)R>?*EZf@%&i2Gmiru$KABll{1js<%s-AXG|E6wmg@#cI@GpqK4AQyx~M+A%O&Z3-GV1It{Llb3e3AJl!hMX3ahF>0TE8amo<_Woo^%y$H)>wv98N1xRYbiS ze<<_4l=o-UI6iOH?O>MjE-~WJ(E)qcwZ0ejg6i0(wwTh=Ud>hA)Z=(fB6r(8FZY%p zTg>g2y)|h2qPjY)1+3oaCRA)0^NeVInR4+Qm4;Qc*daD673bC%+v6EG2-ZB+>Ibzv z=4C*2xq2LczQ@H-tvOaHI2TW|e%*<6;Sd$?543m*;?G;iMllgRCOrngcA5?kU zZ^v8vfu!~|zn`D((~&5B$~j=0o^p(l#!dZ!{T`y*Z8&6KXex46r)95=)L`;Te{>3( ziE%jzaJc^6gBo=V?85H!HcKOVvSK{fTthI>@R>@+-8;70 ze$RxWjvs6;HKTt{$s};&X*qwL?{YL?nDcN$ox(||dcmUQa$=dH^trrPXs~hdp6~uI zR~*Ba;3t0+)cV&g`oTp94i4xViL>dYq@0w!7g3w0oVhrWs}y*1IjUhwKSXl+Bdz^h z%W(Ext6Pm=^y`YpLT^KJ5z|5qYLM5_+geg=mcs#h z${)ptedHE;c*W;*>+~>gleIl6xVNS8eqGp-g?TXzRbE!Gmrg?Oq8iP+-4ncEn*HWV zbrPNn1TvEaUd7V`HPH60y0qO7*$kn2S^0^s>VGyVoZJQT8xfaLKPDiF^l5?VSQz5T z{V2=!bxEm}%l+$~{efe>gZXw^qGZ9{olZgZ z*9l`VFJdRA!WhD|JHzQ=wAq{KOp^av&RN9{9|90)C>EqeVZZyC|G2PerUa}aGiq32 z=Xb4C2-Z^>pEyOjPwPuN9c<1(*T_ukHc@Q|Xl)uOLc;Kn2wbPbHCe(A=osx{GEG;} zYtyi`g0P0YI|}MWEAP9fTfBZO4y=l8f&1#nGPVbtw)7>Gzee+L6V&z07p|!HzHq&J z2GcOvyY>^PthvIPnmv6S_>Hv`!g&A2{?;}qcX+k{J5(!r(L3;OfJn7iTNge^rkHwx z!cw&grA}hN!yF?4im1={E zjF)6)e1F24T(#1wE}|d%vl8&zf0@=^STt8s56v&mc`Y$0=k#dkv&_ZK#9{XBJkgm4 z3*#3JU4^Gyfdi8Pp`y@{u}?e5sh3?-!~5<@%dw?b0i&QPK^#gfK&?jNvGS-U@vEnB zq|2HeGtS}Bsg5Zp9@|}!LyY?pkTKl785?m;e`Q&lck>b!-lFS#wSF2-HALHg8+R4v z2c{gu>S8sv+$eRu^V5tVx&k6lRA?*ZC-&B)QW`}IS4(GAg~7soRfXx{;CY-q6(m8r6W(qzkKBoPlt8G^d7@>zYx|Fx^8whA5LSD7@2Xven&fwp<3PBS@T&RZx*qO6WWpe!?S#~-k7&i%#}DEF6oNFE zNIBZLVR=j;eW?ES&o+8UmbS_6op;(v0N4?0ZnZAmETBMHj~(*4a7%t=^Un)P47r#gL^uefs|6y9;AH1(|jVV8clLnTg9;FUGtz4!z+R%QU*Y1#P#HzxkhB25>|&nAsW4IjOeo`9 z*%AXzDjDpYq~8*E@-0weKBh`k;Nw)dI>-i1E_BDv)D(2t-%J~!uJb8`bK02xvPWTo+6_*LB|AcT@Ijoi5dmqFn)OJ^vuv5cuXw-vuytDsODBS;iAa!&Xn;(!Exk z!im;>*-w#LzpWo+$2~n>+?MAH*<7ODTJhnkII)(p?%G^m=zl_+v5=V5bxj2p0196P z=|A3EpR{@ceiXqH?Wl6w?y({E_aZF~8ORth6?5{vk#3tOU+%aaRlZsN-0}aF`qL#A z@D>jHwBa5FIi>BTLoSDpUGyKB zOfA53d6qJRgg`C`urK+K1a+$Yp+AL#t=#3&gV%d>2dX1ti&P|TOOL3@{pO!_FZAbs z`r|E0eIa|*LtMI9wywn0PhndGbqX?Z7^g@r&U)xqJ^^MkmZ%XTMG&!O*b^GYqJe&0rJOxN zAM?4+){(yzY#k^iA*x=3z*<`(P1XFxjV#;Fbg!}hwzt=5Wxb3J=FUDWT1I&;2$b$F z)mf!6odszJ*N)Tf6Q*ta1SE4h$bi!|fgQTQ6$!SzJ;w7HWxE^LymD3ZJc?tN-bFt7 z>v|Vc`4<~ndsba6O4lu8i9b0>UFA@21N}TgK28_N-{pl{we$)yot|#B3!BrEy{!2N z9Ndr^p*>vJrUq6KHqa+X)3)040SV8Nnrh4!%YFz`kOZgyA)D6g@Bk%6P=Q6}?51x~+W=lIPQ)<#ty*QE#9G6XP_ zo`27dPVKFX@BQG0Vw-3Cw$Ij0o12Eh?QorL&&82&_G};^z9t#*{e%4AaODR9orQbI zY5#6ur=gdS`FLvw_p;^|LdDg1NEPKK** zNsv2Yx4PzFkF;!)%(vk#2lE;s7D95Bf$zbG;MUFaugDHvcfhC+@LdG(oA^EnHIl=td(Z4$0`_hCFdZOn z1~Lq){O^#vAQb?=ntz8b0%!#i4-8j40%(a36U+JM22-_{L9-o08DHd-NCo~BF!TN~ z8S^!1fI22nJ^>9!BkdM6kpvI7LX40Sjy~!p-D$i6rtMekz`As>2T79Yc)-%p zBzZHTJ%MoqzrY|}zE%U!Rj(N|ag9l7CMuPwo*$#}`4)7=C$mgcnRU!lJJ0L5OYC2Z z7dy$&AfdY6gb-ek!@tgeTtP*|qVfbnyv%#G_kvmfA$7q6qcesB5t|m5Xz}RkZ2>+S_Rfp%{ z9uXJKw)v{eA|)i8(--U}2J~@*Ji7^a+_eIq?$+&Eg+taw*16`Phnm3y);yLjc^d_X z@&=F{q>DuMgQ=VscZlTQ;h$V-GsenZ&sy0VX`h$TTeZ$%DlIokL!B#0v7W^_pW58( zhSP>5g-qy#!_Bbfq}wQ`^9vuuxBuiACqET)>dRC%9KNu*lNcP%>ALakxMLk!4kVeQ zdPSh=%0Jt*Uo%Y|QlHIy+^=|k)}N2?mr8u#WVYtw^LsOU3=;~c)lN4_-u96&o?Lq& z1{`1=ecaJV9j|s)-c6v~$o`>cy4F@~_PZ(A?r!?~mO2?^Q<_sJTC7u>)=B`p&cP_4 z)u${JO4~TsITpALM-O~H%vqbQ^zQ_0HcPS0J#oyFJ`L{K+ z`(^_EYPyo%)elCmD~LU5V7_lkq%Lhf!@oT&=G$)Dc`V;GNhPi6+k2D=yz+TRU(^@u zu4ap}@yM-^GQ9Sbg7$E66}2AQmVIc3Uw_Rz`ST?0&XOv-iM=r#6ivZhdwl6TG^fl& zT`u!$y}5y>|Ccnbc}xW%{p;CUo{G)ss%mM?&*kO)DFy8hyS)CDGb~hheHSx!n zL|1Cd9e%d$-`34Ro|v1%94DiseAl(VDWxyrKR4#3!832V(+DqY(kj*odfXB zcuan~Hj%8n_czhs0;hRFijtPSDyl^|AHRsPG>c8Pmv>)!Ed;PXbED6MhG&X*9t?DB zr<)zV3m{L@7ig1M@ir{l{n`*>dO=$t;oS6J4xQwx_qJuD*d)G_Fd8?8miL$AAPUxN z**AliMpF-k-mL`41L0@ml3iuhu4~_6RjeOi4xz3iTt@PIvr^p&TuysmJZW*1GSyiXTl%3f_x=bU=t(LSy(hzk zL_>s%pA;U%_D21rC94sS;I9_`OauGw(;-pr^+mF#AVP@fcz*Tw6!f8?Yh!7hJMjpSMOs8W^VK=AHD^q#`~LI2#c=V>J%E;Y38?8@w2^tGdHF|ytb zQdiy97SkgknP&b`WD!HG%rsK$fi1jp(eGZ?7u#3*4T+sqzC>d9T z+mSl;BJJkYLh3U3F#6pc6`B<0%c1$YIHPN{jUvA!gHGEgFkH#>!vuOvZ*~qLu4>;p zD^Na7EBlq+KI`lfN+F)TCX=f*!SuCsY(KS@8OS>!D!s|e2L+U@193*{>=&1y2Xh5fS5L@eBbLAe_ej#qd zO5Kh)W!r(4MrhJUvQX0)JG;kjAJnoZ&dMRRC93t1Xa%FQ>_$}hrBZ!En~AvJNs&g6 zkIm}gJQ)V)2-^P4<&LpH>;67J>1V(MdBO#4u}}qnC)P*ZbFf=@!Bk_0{NKXy4&{_f z8rr7&*e5n+phFOBH}1F6aobs8F!(YNzrS#_RpNInjym$FI>Rd{5W z%%Aw{{DJkF-bIntfI$sCMsV_P%}tl7Hf4)$essf2=cQ|cB1hE}z>h-Vy5|qV0CTK~ zbxh|PBMHPkaOLHN^~HYqcNXT=izDcAeuA$5*EJp5#hA4N!E#}3hWc_)BUvl7VZgaH z(Bb@Ohy+2qEVM)$e<>oYlXK((e|p8k19qMGmrk+oQf0OEA1rq8s_CHb2~DT_uMB+0)gY8xnN^9^Ge`y{Kw~7e zU2tZebNRq|-mm&`*~GEruA$kP+J}DEkDi|LXW60Ea+3VTR;=8VZc^dZ=3?0-Hzkce zR&pEb7XLe{r*ryn73&KfzO=mV>r-hWQ6&$3@kh0u7Xu_j*x!5GQ*R#2B-6-fl?U?lU0Z*phc4QQc8^r=vr}GKx$Sb=6=L~QD zILFJ9+l6v;mieP?^?S{M1nGYjTYCxWYp?c8`QT>R zNL8M?*N?OWYjjw$6`{4))h9W+N3>id(T?m8WbDNuXVjrw5;@H<%jyoTtQzcqDeyRx z(pI>oWZIrbhuxhgZn0ZcvQY+nfHO9=_awmg1*|^Jlpvty5TSiu8pH1*8rlG_&A9un zd<^viU+8t$ZeYkORxf|!qn7!_@cPm~EgHifob`7oq#ryFPgaw0x^Yxg^2-g+@TGh$ z+5GwAl2Tw(9J&#j2^?$vM^|MF*N(6pUbLnU?oNh1mi-C;TJ2WsM_9+|D0_o(^P=X% zN5rQnd5wptjKo}~;_FKL%6D_zC)aVy`Z~8&{>8^)<^nMeVG4WQ#wUn(DQP!j4Y;Zs zWLXAL0gH8XL0lpyZ3^FI8UO2)^{uP8bLeeuXkbbRMipex(w6bNCtqZ`rmC2HZOY{# z5Z{{U(=;uuP(k0f_(_^!=rB?W6-=#Q|Fuv0XSjKZqi+5?zR6ns^VdW6tQgMB^*P>H zjg+m>47Yn>Q>ti~r851!VGkR9SF~GoVYr!>FYOP*&Gtn9hb@C*J|psb$)Wp{!%)PI zpkcMi`D^`scIRwCCRV7@03L-FuYW-4q~+cIOUqT#@;r}bBfp$++epHq+Db;E&VzrC zjDSKR^2P){S{MVe4|vL^RQ3}CnRgk_1-DX7Ef&{&jQ09t=+*W-&}!Ivstn3f*^wbQ z{#qfd-vYH`&dI=G9~)A&JGC=r7m%iFbn+D!jSd9Z6BMEoILg&vx^6fY~FyMeuq0*?0L_u8(s5gyLd4Y?xtgwnxUdq?@D5{i>RbKK-Po zbYFq9y|ByK>vq&PubwiBadB|#V|_j$nqO?Xz4{!UARwtZkoj9lWr#eRlc|0fuUfXY zFPU~+$w_Oh$I%bkA}7rK#Fa}7B~$y!{Ph6M1N%(ySQIshX$>n`C&Lj`GV^%oQ-;?- z_}%mycD9+v*Eq~W3PM?K`*_8?d1cD`Tm|P@P9{R0r6prb1098AY&KgdA!{c;%`4Bo zS>B>W+vyj_Xs9Z<)x>m1wF!$>HJHB9Ei+{~OBD6LD32WYSn7|XpaZ#5SU*7NTspl& zNp4#zF+p;A{T_r6H^X^p7Nc;sA1Pdv)ET`#Iw|9M1ulCynF}}-rQ6k44eT~ahxIO7 zHVL#ldLXPW?KaGi^8q1?{;G?5p^dupCu~ypw3VmM{jWO&kZ(?FvF`W+a2ev;$Ib;% z8MV4fZRtMuUzD1Y@)weF=(5>9WS}(@|#HZ?^88n(5Q+MQ~SZ+^X zCWvE!Cfg?~n-xptwa3dTGVPlS$(K1l8sgcRP3OzHqt3uN9QE|t0PzJ+veLr%eeov3 zgaKUDP+9fbZ&ICzo~A^eZ7Z29G_AT?nMNC~@U>OI=eLa$C}Wg%LVISaSM|uT&y$*L&du{b9qwnMMdAmE zI-MEPkk?3etRQH!=*$|*&lg1aKZ85z+^~B+^reHs^YkHoQEoh=@9%iZxG}=N8+2X7mRk2W+ohXl3#RZ12$^9y`#`#xk{XG!FZd#Rr*+(>_sGRX%Q=EKe;Ac9vb=jl@$Y)b?89#w*51oa(mA&r zD~IX<#}|&Vnw6%SKI^BfLTTJPpM-4L6~~QK)7NX*O|*fCfG0y8wu zR=pKg5W8tM@wVtx*7;o<=Ve!%pUAdqGqIDn@PjyaaMR!P;!vUT$2doy-olu=mE0U5 zM~X)Q{f#9}lK--<03Rin;cq$gonwxViPoQ|2eK;nM!$Q35XZaTw5Fu`#vS6jC-WU_ zo@9G_DG|qAXd+x*)B(qDs`Q6OCNLb7Nr1%5b$=}eoqIdE^2e9wX)`(l2b@i+76UCe zv?U6pVr+Yj*;7Az9BSlmO{!doR$lpnxmW8ip?6mUEQ(||Cx%K>^J3iAbQw)XPS~sb z-ax@ow+S!add2iau(FUcIl>~}4(sNxqp=2SiJ@}q70sA!5_^DspuS(WUaLb+A6Y{L zv%`YY?YAaq6Z^Q=`c~X6)w?a|kFnKzML~){G!tiz4i?3WJuZ=XU!A_+LG9*IcWu!1 zfHIf|xj9xuJwpI|OAk519$47#a=4a#54FrtBJMgAnq-CzvncL8{JjOkdYjJMpd2Wo z`gGwV6EblMAaMl0aw0?WqRm#<`?r@qU8^=-I+%Ox-5cA%YO$qaoq~8x=bD@ zya^Z}pY|!Va8T{Jhki+1I&T0H-9=K^}>!dAwzc9rbj@duO5`?Kh#Ujmmd zuq4^`qAEzdepbpF%_LXA*EQ8|mByr{E#CzBv9`DPjW!gf^h;iiafXhVK~4T8)2ET1 z);Pg!uA@m9uyiZnTEA8`@9L}q3QetkT>o&KQpQbb`+%^?@~p;cFEJ-EG@r@JHkf?< zG4=?qV_e>hkkDgu08=~tnzAi$Zk$={O$gF=2Vw0{T(&+%Ev2yyU-eI0`s_}?Ve?IV zFCW!Joi{wSf0=KS_)le^$EDmiI7+rQ1iK#nJW@F65AZC>Tl{@1Gv5r@0o91RvgdVE zhIsj-*38Z-%BYy7{kt3LNpbl0`T^&wer5U z%eAd7&;gLpHWUb^ZVFo7whW&%TVo!;s@J za8U@iF?hoq#7^3RRvI;uzuf8=qs+WrG|50fPR)QQ-#lqX+VUa@ieL#y@%ySx>W7cF z`51u=z#@d^=Plx8w)@NPj*Wc&w=|1pcZ+v5s-Y}ENN3xMZG5Oqxl@B}G$v{FMVoEW zK8LLCseu}R@TN6N=6>JCED~^j(b5xcBI5jnBylG>i#^m3$i>+nVjH)Jo-{iu2X3%B z8h)2LsT;g36YPuz;)K>{>Y5ARRoRaYWU*!2mtdJ{9s5%UHZ)1rOsG(w6q^ezxxaDC zeblwEbs)B+(T#=MWw4ePh#s?Ocg$_d)Ki}7G1Mbr98nIq8Bb_usnob0!imuXO@I|l zR0~6bpIp<$4cQ(ls%m$1OJ7KidLE+_YzA!?R!=g}aWAa!?`f%b*V!JshJL@l{|?H2 zqIF)>7=9d2=Z)p96TMxwn8OwMW zXjgB@^}GV=a6XA^QU{k&dOSv%=1Q z!h5Db7AVw8mSvgcJM!4t#w?>ht{pG*t@D>T}>hMwj0)DtD0q(XlP@Cd|+&_51W`el=< zSEfG2s}8rqiW%!OjdJWPp1-}=Xy~DJKbu*fz0x)XslpNc5+aW;Sg%iC*uQt>I+CQQ8LW2;mqw4s;QnDDM3vZv}5HyBfn|9D5m5app&((>vDHR)u- zAgw9-E$%P$BjcS!1uu=~y&c+kJb!xMRJ%C3Z7|`Dd-e3W?7?k~V__U2I6wpA0^a8a z@N&b^`TJ(VJXn=_u0us6MwsGW84cL|RRLc7OC~QxHB5|2Soa=M>|}u;w}NS#J4Ak7 zNulS)C7n96_=f7p1fJuuKgM34c*V~DXVT^U-^0|O)V9#>Y)cXj>eZbi(2;~Yj9x}o z7Qn%I{M2YJJc3ybLP;E8@y~(9!s;Ugx_IxNn{i}24SsWnyV6Z z3O*?Wf(0+i8QOnKmbi+xSoXv>mp?CEU+-}@wo|d&2)>hY`LkCP`Py(=Q8~$XFu@%j+0;WUdW>VD~!u0?iASe!}4G=>Kx&sc>20`I}4O z+jT6`rLr;*J%I`Q)&hQ=H`VTFUlN2TS)+Y_PF)qO<9fm(8ktTtzm)sCoVy;Gixa2` z3{u=e$jlmi^8GDU;1>5+7?S^P_-{THYuS1}_bs-ci>gpAqPM{YJ)cY!PF*8qxOuL` zeKl0P>kK%%wunA^h!r<6jv?6SCulP=D%pmOyHB??DbXPDb`N&k9X?JxL4n)e1h`I( zqKX40i{OL|677{-4F%T)UYf#<%vM=0Lf!CPYGhfnJ!^sb4dcNRZvfk&yiMfrU%Fy_ zr*t4XBtqVTJ}$BynCv#A`n4iJ)>w&e_h5fQRba`8Z>Jd?ayS`b0vWl>a3@O=bSc2= zinr3xgL12d3iZ1P{z|hleZ5@aJ#3XP<}v@%*^3|P<($kS@wnS8@wIAF2`7lX8xy(c_%4G2@Wa>%$Y! zqo+ALa`5k7wuge??2>+~ucKZ_&?Qc=V~X1)o)AJwaDV8DDQFA2oLfN|Js}AOrv}x& zJFW0b$XLdfO;*y)4JL0^qE+IawYzbdghRi)PfB?dPo#FTh9uv+sy9iy&b)FX0pdqu z#SQr3)BsAmsMjF;s)S;&Md@`6*NN&fIs)$08HHsznX#XqakzVG@KB_CatG(G^%Ht` zap2ExM9>0m6>%wXCOA4crGf%b$fzc&w-|SwKumIv`ktFjUt?AY(eWM5wdd9=kLzie z+~YgLW#7g`?uuFMtLf5MahnTX&a6i8B)nkmlmC4PS<*nHEC!^ZVZ1@7j@3$ zm|FeNHCl%7?wlIRx%Ooj&1yg+qM^U6DF+A(k*1 zwjSG%$Yfm=7N^0b-&&q8W~4WCkGYd-!p<^84*WU2WmIL*$V=|1p{-}_6tcorP^5? zWQQr$GiWa#Uw28$(H;o`XfA^hcK+i+spXSuZ?B%(U*q~pCz0f7^wdf{8{ra^$eokm zl}fG0%=D`2VZ?URe#1yB^rVqbk>Ae*qO@}{9y!&h)Lu6;PeMm#1%MvtMgz012OdtT ziQ*55TejdP%XKAp+aB3wyIg%q!^^k_b|_V{iaPqnJQ?+HqM zcBe+->#-}Xu0fumdSOy=kuPOy<)aaeLmRdJqD_YjG>1PH!+|Wbn_YZ=w@hvw(yYXVm4;~ylu|^)RF)U-dTh4ST{WA8 zS5F?`tx-oGTYl1A3%1;TqNOylV60}%9ZKAF`J5qhL*wn%(=)wx`n_|CgTNIX7$Vw! zYqTyotW)&3YvG&`f}x?$DABXd;txHW)b2uo5gMBpt&}AHhqU*MYHEwZMe!)2D4-}J zAfN&wU3!P3)DU_Ip#~`d>AhDAD81KElok>Qy#)eFmo6oNKiNFxW{589XbQ#D(?uO2`#5e+v$lz9nnmsA|;Cej`y zFPU9t>CuxxJkZgJ{oj8(7QEd5y1OcNqTM3phsuWT3FRlW-;;lqqo2CFHX8$xk=WZh zr0SXH`>_Wgz2OFTUyvHYmR@zSS7{|_ud(|JKRGKNL(c^_nSi5Zh+h-RRyPap1%Gxp z*Km~#<&$2oa(r<3;^HOE4+IrC7{l}Id`c}Q-C*F$WP)m3Jh3*uN6GiVjmoedvv3B7 zX=c26UCJx+T2!Es3OLGb8e92?j`jnDa<{7kqDJg|owxC^GY0SLg@ml!q?8=6lhxXJ zw+xmyx)(0!qAhd{vwmqKTh6-Q~r2$8o!TuA_DZ_BwlV`WpvZfm z$$(|=4K!qrm))x$@7dLVhCHG8B@p4D>mYL(`zSPF=3N-F@TvW8e6Z}^BGZo>#niJm zMgL$f{Y$u)gqppf?EY|QOM#{!cP1eSvy#_qMI)Jsx^ux6H+L-)8h!B_BlElVzbG|@ z9QD}GoZTRTLGZ=9_b-A1tc)5q-9F-bLS5G1_)dEoe78pCUrl8qK5YId{ovb1u7(r2 zb6`kf=ik6*kG+gY6bn7!llD#KA?$aVU54#fRaCW2e?51M4V~;(`P^HVYpLGBGH>2Nfv;8%>Z7yQZDb2?+l=;lvAGPphyj84irhoI!E=4~t1zR+$ z%(1G-SdWHn^RGOMKep8yv)P_adx8d7*Ew33mzvpQ2mZj>{14NX4b)*&OA&*h^JjG;9p(>fAcf8{%c=pp$tDhx@*r~c!uJa%>lE( zKm0Tuz3SAfa#N*e*rVm1be9mE}@T0pb-6or}VF%Er$w0iu zVILOHIph0td*&~@pR-T&U$G02oRD-Uw*7caB%nNK|8vFWRi~z=IjG%W^CQcBAtH zQ?U0Y?p_B@8F4+%X<6-ywJp-f+u3_!aOSbAI^~jEOr6}*y`k91_}@UCtiftLH~5^- zz>p9i?>J-*b8m~MQOVfI)TRm?FHJm|F|tOahrYS1Ae+G(Mz_)^xxBS%a+7^KErfCC zNtA?o$2V_=BqBD_C|^#KdrzU$3;ClGJ+hHKz~Ae|{U4AG#3@Gn!wd zUtFQ+*>9)Fv6@|0Sk~O6fc(#|;V=(V%bhH!4LhRO6hXaKLna(ca6d#78LE9s9{9`vF3YtV3vMk4aYr1B3hiqe&rZfdO3#Y}_ znWPJTnQId#tGL7Dy7U-JbC>Qqh;py(iGSGykuf)@DSp0k$e`Chb3rcno@3+!E&k#B zt^ZcClQrtd4|t575eOKhV>GXB8SOtE3-eu+t$0L1AtEC`ixBbsUbx~gO{FdBx)c(o zYv*c&lzXGl-LztIdArSSy_FRQRCgKiEDvfvzZ593*uY@IxaV@ zY3^qnwzz=#8(6}hWN&)q<{CErd>{r{x$AbH?4@=~6L~v}$yN6h#;iVR;~i#wda)+V zeir0a)G5BGzx1L~U|fL;xE9^?|8eQz%Dri(n=o#E#CYw$zo|X5#jBC4_lUq99{r3! z`gn$0RlOph%EW9L>C?&$`-Y^R*``}B=2hOc@t_~5jaqJ-?aeuB1V$MzHC)Y$46SnFmo{LJ?J;{!w^B_J^K5^!eSL6PSO+4~O-p*KJsnd%qq2n>bKW5aNrY z)5Q$0yu%<>DcJA+H>Jn^&fB1ch+#|ABMufP-MxxORIP0MhE#qMA=L(giX-EQkkc=Z zh=(o8ew5_@i{VwGy)LY{k0dHoOY!?(vRQ|_?9Jrdd55qk@}-V%eW;=Tb=w=WT4E%= zrZ56s_6>dWcwS*=(utCSc7c;A`E!=1E3s5X*5u=R9$r5l{!$7*iSkF2!R5nEp>Zqh zd-Z!1_vRVM7=arXRqO5BdvfG4D(2xvS5$6~|GT{<+1nt>|Mz22b8FwVy{;by*Mj7R zs&%i;QyZ1hQd~$OXu0n}sf(#81j$FSiT3ONQc%1j+lGQdjP}2a?PH350vWs%jbF%H z*Flzt|GBv!~_ZvTCz_OR>wb*}}a{ zu0iyR)!%j7kp7hZbB$pZ(UPKImzNzMh+j`ptLlgVxBN_j<19^k}qbADD`51w(~83JcRCM_6BCK}tD{tpjFY-_S<_ix1`fc2{L9 zT#M>=jQ9_)9h~C?y;t+iw5uC%lbvejdI`-KRC!=ZnoIhEUL#p;Ygf#Mc<0AC=U+#` z-dXxNQKwTgbmrjHl+T+jPTAwL)eOMnR6382$bZ@yMJ^C*vOSgZa$rFT`fJd8+eJK= zi~ZX!o@;VdsfojE7@o;nV6VFH#DdT&Bf2m6d;PRmQ?bm^GBeqErTR3 z+=_l!U_USi%gLmuZa5UUB($tJvs#Q$eeLoVZJhJxGRDpkqB+!b-Cu0X-;xu{C4tNg z9&^y}xrE~|nYK9o1-6sxo#5xa#N7tqkd>Wumz{GE6Een>+QXA(%bCZ~82pAk^>0ZphQhiXyU^}FQ1yu+P`EeO$%_Ee>(^(9aesld;F3% zam&N(VJ>4O)XT81P$1Q2%;SJ}%Rt*0E9JyrySfv5#*gaVu%?^gtyV?Ve@?Y2U`vqL zF3iafrW6>TkmiN%%pBBS1j&a{44uVJMVG_!AVlezo3d zQ-x>FP70gNZLdelIA@~thRPSx-8Id$4LUTQqg_((LrMf zkHD4%T|c}6NiRh(&5VVRdj6#gUGD=;Uw^7c;#x5r*9Ys3=uYtpLjXmQyz<wJI&VC(Dwj^T-q@YhJpo;QHH&Nj%)T)zfGdcF$&^fAB=4=Kes(M!6|97+{Xh$ zA9eGy-9Ah+RV!3LDPK%^se`nV#m^56=+9d+dsz_Rqt^6tNPBDCpoouRVmuQ;Pe(bH zM<7f(bj!r9Sb&^yXu*G{RO`OGtG9Gc$`19F1F8r>r^?sW&vM_7UIW$P8^=2&>f(xk z8d>n^V&gK_`41kRbtxM!`}>#s7jd{e!*;`sgwk4l4dQFbWuV~hffXVH)r_!2N~pJ~@L{s6SKe64_&EL|%X4qa$ae1o znMNzi{)G=0A7IqG!EXQ(p%xy4y{aU{cZn$R72z|VZ|rp2S+8VDLuMRpkDFJ!96>=G zy=tRF&~;{d9+-gDsL|0lXW1EU_7AU861}-WJ-kT6+L^Ol#7l{r#=Oa3c!w*IZsc`| zs{0*)chXN=h7$w!H|@eP*X`MaXaJE=8H&?dt1Wd^ze?LNihTw=qwl zabKGQs5A2c8l`9R_!8^>oqY+Xf9StvC%JM51pqxXD)A}pzyRiu{%K>P5h^ z^h7Pk!rD($u&s~I(BmA^6{~{I?qD}o8>071Z1{#0pluwjppou8FkOAfSH(N zo9txIk&wqA(YPG0)MumwfM+0}-*$dXn<-U*aVvrH{AkOpuhotz-CqCiI|kp!0m&{( z-(#6pTb|Q0JSi3W!xt{EW;dG4=V|OM!kK0C$Hy_&ocV#mC&+Z)S^}+aegH3<*wovJ z2DKD^LR#$3_0;Sle-Dm6VNhw(``O}A=N2$6F1=xWd}05O z9g}VIy*Jofwe70MSWf9NG{;G8(=>;FIxD;LTlVyY?zyWJ^E5Oio_t0OPR7mD*JG=6C3!QnK{4C7+s~AbESuJZ^pEU3&;UT6hEX zuugfkn+NKy{`Lyo=J01%gHTgydEb?HV!OD(@(JD{(y4ai=_hyijnaE3hml`YWyZi* zn^5iD@8K_Uy2X7h%{(D@^@a%+kVn$?6147prsJGyMcv=Gz2r?)HxLo+-dKS$ze4jKHto_!K$mCF>eLm z#rk|O459E;wpKm|7)xl*q zK_B;JIs1^H8w_A4_E{X?45YsTgW3B4-LZeO`s_uo9C=BFhYuA7 z{OMNQ#@=BfA~tQ`ZlC0cIjtGs7tK~hojQdS|1nCFy!%n-z{{O?%}n>jjB2qgLRJW~ z|N5)N=c-L|wm3e{U77R<2HOd~7P$1b{E}?#*B`JxGqJ4kUe{ttAVLg)%%Ud1fW zNDGb%^_bTI94%SEx6?uB)v6fFRmoS`6X89Nn=>@H#WH`q*jaDx?-{O{w8R@8xp|xv z;3ss*^;77iWfeMpTaphhb+n%^Wgs<2!gJff1APXqOtPV{1`9ifq-pC0;){Xb+zNrP z(-YD+*(V^d=ew#=|Ku^ny{c*N4@6dRXYkewtjp`r;9bJ~w)o$O)Pp;EQPoY2|{zbafa9 z2)F(HYZ~xXUIut^BD_q4MHr@F{m}Q0aj&=1i4~NyQwD}T3cF*CQD=TYDdcU|FLf05 zl-U)wa-lzaFVsl3962G*#!BoVN3P^PAHDa$bPHPChS2nebo=*2zjuNzddZju)nyh? zlbm>gY_fAV{yn=80iEGwQj3c0*6y+NR)0eP=p=4uj=6%HQxwO{j+RRY>dLyZfi2Lg zrxFS;)6n6ALN#EF)o2TH*&T$VAfOtm0h@6n96`WeCqk6s%0hgaW^ z)=p$(^=Vegr(`+!aM;b)4_p~MO7dw!IT79rVTiE|VTac*bo>8mxqhWN;KK4+#8FLr zD+39~$k=Po*Gb;7O1rzL*90GZjP5eg(qYc#iQ+r-HDu0CUOvD%ZQg49UFjq3!`gCN zaYfXX;~w|p>|~-xy}lv`(sLGLtYgS5vqt2kq%Xq5w7R`wco21_e2C6E<|tbBeX^qcL8By*=G$ zaXC}|b`pXtezfF$fB4%TDhhoq|C-Z+-C)Op-|~~H4S~_GG~tHPPc~&L0FvFMX?e>w z1xLNVJf|AkZQaM*-@4Z?f>T+9Cb2JMy+L|!ki`iwSdE3abibKKg^#IYl|{PMrYkEh zZj{=IA;mWr1K2jq(z0L6gULHD-X6zUDQ)ySwFoj=Jo0Rknut1&-zsZGj+%(y$pOn) zezQg{I=h_e7P;PVc_~8*GiJe0Gv;=)z)>I%v55wsXF0{zvMTWk@u1BZV>Wjlj^JC* zt~7^EWWSwBR&Euok01iOlM#T3R*y|Nv%MH?4P^xwlVkg;z(}VwEoUj`f=PGzC3W)aRNg*1 zg+ARj8iaY_G*0bVGN=%3+8?4M&oWsJ0Le1w19c~ij=ZDPk^TToh~T#@jRR*XRf4k| zzC!(N4R1O1pfWLqGn%OvqW~wvj!e*Hc&Z5s&)^axSXq|{@Luz6c?fg>t?ZEM=;7GeLeDHuJ= z`o**2;as1->i`mN6OyA%H|9M9^1irj@astiifU1z3+5;A=jYWQ4CI8j7nfl=ZFAnf zb?8{v_SN!`$T>~rC1LUIhgU=Drl~hgpX>EzJHC^f3_eW$ti)uM5c&V$AGi1EuH`;1 zi)>5eoAi7b>iE6`eLHWMD(-V%x8&NihsfHaCcQ{LmZpW`xv~yIi^&(ej}V*`*^*^* z`!j9R*AsWz*HtSvx(~8B9k<2jT$-M5mLN*|woJ6N+ya`~ks z2$&MISiHlZf$AYAwd~%kW)1jrNaeY7;?+=?>Ar)~0mz$ENiUp;#21UwJ-};*PhFoO z)%gKtdo!u<3T@_joyvI~byF;LGYMyF6rkgOF9Us2+`j4$hrVC{y8ru?Db(5>3Ypcf zNFZ5$f~%>j6&=$hTN;3sTA4Ce2GpduyoGd+9btFyE%zqJNmm zS(RJvEL>IMO?pja*>cP33MPotgwUOB`^b=LpGdNlr2a0;Va%JdI?rrj-|1=4deOah zBfsqLQS@pv++6(9@oTEaJsIvUz_V&Snn0EhVAnPM^Ou`rDHERp0<&7r?om!FB>vR! ziSeTLA#olu(68CybT$O|tZ_ZYuF@Y=T(_k)(35%zmG=EC-ixL>Z9IQYSQ5I z-@PY*=p$lBxUf5U>#@|dWg$%uK>r(#miYTQgY8 zrC(Iy;R9Y8uXJ3nS5pM|Y-}8ci|~q$&Wqf!S#yCnpBq%UXBoZr4CGIruzxr?B;38j zHuuEIy;+?UO)C!!igLi_{24olDCTAM;O`khmsO4!hdn=S8W4>ViwGA#J#H#rt$6;` zgS&T#;DFVjsqr{m9HrliQxG&5Rs2L;Fd&0JA2oWj+=^MNX6s z%Bw44-mkQ1ae^<$_vp7_!i%a@4+rxQt?vSGYEY_0Wt1zYz0WjF`mzyU3*L10i*M<$ z%*#=^SJ9>w!$Vy;TMx-di7DpOIlrNiXVjt{BrBGh{j&@}-IasKw)h%OOe#jX!N0KZ z5e#ELCj>f8R`fLK!0RWAJPPGW9DEeeTM+{)+!el(dm^Xx452Q*naP!!Dn_GJC8(TbkcdRx3tRfY$Us_MevB_@}?1sF-lqk)#vwA;g8vq zVd|WqrUYA zSA`xuBIWvdZkg-s!*&zdT`t+qE=AQytR1%BDL%UXhU1GZ6BVQf?o!;U``=p~D z4;7WukC#LDE?)57eCtl*oP-gGGQCq}d~nFVu8koYQfB zZjGk~pc-lVrz(%TbN>g_ULO*kpg?S^nF5mO|z(ypSw^wi!E-t?DZ_8CS1 z7F;|W^~GGZ;%Cs;Xh_xz1-t%izL{T-K74YRojw~ME;N@c!6G2F>0&K3OW{>qrXlaA zu?8zLmEmG%Fw?Zuh*gtWjlYrp&F5uWVhwl^JZ-e%TjvqIi;{_>XP>-|<`BjjQP|35 z4Mnc|1+R6*+L=LkEpSNry2r?1Y00-7PoqGGL4)H`tZ_*%A(1+{Fkn|@;2E1oWrr~>ZoyHMRPEkDCX6vgp<0B=|b%YVy0t_j>v&O9wN zgU|I!@-g1dHdzly|Jl5y{TY47#8DNlCe*Cj@}5r%YQL!K7d=8FpT?2a?&8A@_bGp= zn){+3k{XZ_&{|D$HcXC8mG}cwyID+$M`pNZ7djU3H=hP0Z7p9bcYXk zW{lyVkM?D)aOJ0Ss#Mf{*3Vye<;J{HR5gJm6=ZP<;FRjRp<_ju;N(^=zjKS9{ zgoe(_+;u?ajJ;{VL2a#9dHb7xJ+rZtn+92}bCC-5_~$x=@OGY5r{2)5M%5&DsQwkK zy9tk9}4hyhKSw4)BLPj3sXn zXcIj~fRe%C%jyp`Zye06`r%V8o#gs+8ymfa>dg70KaL~5R9ElMnX^8e@j=LE~pHbIGokQ&oP{ zo4$u9O&dk}h(7pxg|v$PVOGK|EVM&RI*(-S-kk%@8IZjPy?(A ze%O=VvE&k$+g)xc=-BDZX4X-BkILi7yh<87m?Fk{L1WPdLCFIc1OtQBkjtRWUYFxc zV$o5$TOzcStSTuQOO<7Qu{bD_{@B?&v|&Mf_mu#OwQ0z?uvA`wqFk`Al)f{uxASHUcD8*?X>qeFr_r=#4>#HDq0Y>hz&ES(OJ%0lTr;F3 z2HnJBZjIV9v+y;~)DFRHwWdb2POl4mGweHiuygNwe{&|v9T@6}n9$Y_!jD78`k5aX zM>(H%Z=X3ZZKhE?yV2`g6Z9-g#k3lCRQy*v!kQ+qSumx7=8;xiw`pfwJ0jbepi>2Z)z5&%S%hD#E^p)Aq{zU#OGA~=9j1|D&CO8ue$1G-&>^^!==#H)(w|~YpAprPYU?s^OjNUuYKTZ zf%=HG7wSpn-ne#cr5;B{L05+ z%V}J{H6O#;LipYGmKZ+neE_sEzl$vas zGO;`MD6zmvcMh{cx@ebRAaJyl(VtJm{ca-sgkM-IHRUpHD|lRrf@ z>W^Ja^c;}A9_!{KCP@K@E!84E{Glr?sGQ%(b@2TtUi8|Q?UqI;v||)S=8o<@XBqrH zmU(z2;asnAJ-vELXzz|we6tHC5phwK-H{am(DG*NYM+P7b;LtjY)RV{8xd=|e zJ06}dg~GzK@-jB-5Byfl|3g3D`R8<{+Fi&!WloZIwyeFoPZXnJTg6^p^Y5;09mCE$ zFdf)7iubt4wzr#fvA|-};99wR5{{mZSrH?Z&dQloOuVN=!&&%kPT4Tt&4VCDGy-R1pAQ=69 zG?ADeJz8oq3_yn7TN9(Sv)|5^N>knEz#93P+Oq&Qxvx{(*Gp2C><%=fS*DNsV_@hl z6Su+%uga`+vCnA40Xt}LGz~?o?3`lKwI`G6G^1l(5Y6{!K$Yv^NYag_dvk_X3<8;k zJiITqA38qaa)`M=_32?^!-unY6&QRI^3Q=ru?zf8%4_+q`%lXM)bqRnZ%QYn>7N8_tzu zy|bpi3uSMNd8FoY3UmNjk|tm8pI8CQpKH6n+tQ|YyNnd48_6~D`3;& z`d%X^CiN!K%9W*e>z$y@c;Bna_`!{IaOJRrJ`iF)eb4+mLOhDKKl0$t*dx~|2{7Rh z!Ks}MGHlwu({^r9X%6y6;hICfR|jWpCJjtEgX-4(waPesh{0uP&PRpzd2NlYrMd`g z6rg4GD>O3JxcMD@i^WjxXzqRAPISUdKoKg=;GNFYlwwd?hb=5SDPmC8srGisAlHEG zF3+Zwe;id->lc3w4RfEtidqn|t&k+)GI6E<85;g_w|zPjHEG@(9CsuFEO4!mkLFP6 zQOPxZ!7-@#z|DHE(bl#JN?T z+qbrhM+-etOi)6f`b?Cki&5`V2Y)-(6Wps#1TIov)LXZa&Z~IxRCqjLsadG3gVPNd zU-fF$_LQ#5O+%GuF!kl$PLs(-r*@Muhvab?^%4?q>aX&iE(ziKk~11Dix!c^40nZs zEo7&}V2MKt>V5wWA0MSVu4?IL>#kv@f4oIhlcHw>jY4DN(_^!50`M8a#8A^m>(chO zk7w^nK&@+Ykr7#3e~t5tmPuV^$$g?rgY>I00P6};OQhraRe%D@31aHBev?OCdkDTY zpyk@2GYWnX_#Hnz@q%t8_dp|9zeZEU)ix6)!*}rbZveYmueh)A7mj^y*kF{&!_~CV ze2v)l-PFXWAa+OSYz$1uA(GL6nQIeZM=I712v@k2gb7i``dgq9PRTx#W#+$H_SD}s zY3Q+qf=cE-B?gR&bZo}Yr>r#1vMtJvPCZM1*C+J*Y#{Eb3t-_WC3@qStSx9~JIR%r z$^v1~*^Z0BV(32ag5PSTp1`{8)P;sLPjQWz9d3sG`{Eeg>&@vqAD1saVZTAVufk4@ z{fSA#1%2k12&HessF|J0zulhfgL=j$NoV22vIy;stAm>Jg9Z5+t*jPQ{8RKb^QCLp zBIG{;n--pIjwlMYZlbj;2@pI^!#;9~ph=!N1fJ_-34!2Oi%r@P7fzZz8Ep5CjH{&8 zZl7~>Jn=g&=&i8@SyRSh4HcmHAsjOb$(kvpY|0rD6#qbV@SnUqb-{{|uR$%J~on}n^B8_zq=|L;rWd~$U@6C&h`v@X*}rvw89i=C#G&2{#e zW}}_U=WfT!NmJh18z#Ev5uJN4XYmH!V^jbxz>xHq^cvqj)^o^UcxzG9SBrUK#ShSW z%UfrxdnkNa6@-&twi2B&f{ZcQb!(XCoPw)_TpF%QvRG3JSQHL&S7>o)KBrXDP5IE% zh$gp>mz0`-+k=qnIoFB zDPz~P@35Z~i2Jc{xVLfTk@soQyldOX8Q~TvW!)SkvhJ64aM4;P)CJ}4shj#W1dvq= z_*bufs;9jFB5P93>*W4S(4XI9H4>C_f+zpF!wTau=XLu8E*6Q4<&zm?K(Ws?`Rd!W zYu;M2T#d6~p+1oY?;K7+;Wr6Ter*tPY3m+d{bgxQb=y*)lijLL?LqkO znqrCbg9C$e=FLoM0y>d~?oeI~@1}n);BPS|K+gTF=H-co;;comW@u!iDHg@oygTSU zd3M(we6XBmvvhrh%IA!%8)ZpJ-A`ba8!9z{r53Zir`hxk+Wu%00-E(G@8eWb6(Vs0 zs*lfLm`w{o@(Fu`Avnj6XK>&dAOvr6@{Ml5HmK)G^ zfJ+FT{bk@iBTf2g*A?GLw5G<0uy_iOKl!=hlPV{AILJI_g1UHho4sd~jU-pgJZzW2 zr7)q>ZQEu%*z!!?I&Ij*d82Up*7r!$F^0N3Ko~VGKi&BhcFqj<8w0o8wzwKXCiL>9m-W^-L%a`vv%6s+IYH3r4XEuZG)a=x8%OMCJn(OTkRgkd zUl!pZq16hIl}9|b+geRNQNxYW<)#~pEwvh#t=RM1%NuY=4KaA^i~foWZ_k4;$g0uWZYbRFxkjdu4f%mY#1pV{3{7GUepgOcU*cD zdq1^E^99RvZ;P9xO|gUD^)%`xe(-z8g5c2BDMXwV|S=TNwLAfPiJ)y z(FvR|FdkU!z;jTm7Xjvs9Q5241u-ag?keF7V!1R|vhy8Iad+IJ6_6$({%=LMCj>W8 zwVq#cV0>kB^gSl^s`b*`BJeqUX$Pg}#Mgz;n2rFKe*HI7m-;>)0Ww5<1al z@xqv`=LOOU{Be3RYLWVO$}(Gzc9DSEM51!$&3@(7QD%}a4N z@zB-)i`;nf*9hiuJn}R^&sd!*VPvpBLOHI#P#EG&P~QQyp!54%hdqF15e`!(tPuGj z6QB!=J?i(1I=oEe40o@c_zAK*TKI_B=8pKI)1M;#-=iK!`tTmTf(UQ(nilhFrPrTi zkJ-0)rA;rDl_`wkc|N&C8DJnTXg*^e%q0GXc&H<|%Wt9{KQ)$=I!IqOWmFhnYc#vP z)Xx`HCFVsmKQCm(?~NL%DsF2O5*j&%RA1r<*{xWd)7{T=5oEp0Y(u)R#eMYSq%z59 z6g-!PJNnvWP?LIkf_f^|+Gr8o&#~mH<(2K}VmgX`lSjq8J9}N;0bMHzszfy48`{|( zV%gwBLoQ<^DgcPBHrQmfazwpI^KDt1Hd=z9gdLSj#mcoZvap*TJ~*Jh@X1-)NJ%$LyRYI#Vvf_~_A(M(Pg8$aGI#aTJe=-1@$LsFug)JFpjh2Gu~t5;H`|TDN}3d;|SynotaV%D_)j2 z&;@XjFdFso59zzy*p^Z0XHzeeZGUiGnpxc=#1qlF!y)F(N7kp$d3m5Y$br(n@Vm_p zL{Mp8dw({ky$O>oSi&YTIAsm>3H6bwdq0FgSRF;Y#N|A%?=uJIuBAz=GI)aZt4Ra( zs{JoY12IVa;rFg>Pd;q%ox!@SX0uy05b*G*tv{!^CE;{2)^+U}77bY`?fE9PG|`q6 zyOagAeglc|E2in380_Q&Bz>#W`Ln~g*6~D$>Ci4RDAOgb={&A{vuOtLb7#I3^OApBpTnuGSt0ToOJt9s)!r0}okBBD;hPqsOyfx`S?=;z99FyS8EhyLHa#;J$u)1>-BqB=Oma;ejJDEU@u zQw46oZt?`n3%744KN^6QFnL`0$*7Au+{WTK*cneWHumS!Ix{HLTWI6;;XTMwS!-`9 z=6?i$_<{F}koL?1x2x>WrjFF2<^Z&eG6or_mOs_cLga-_bx{&P$ixZ!uo#NFDM+_BX2BBMm9zSKuQF${ zNKs27M*d)1*H>6onE%f4w=5kiC&rap1{(-FiYH8)XySGoN=E~2_zCTBc zdJcG;g{rV0&Xst{vI%gNUs#lFTimy!eRbGYw)S&mPtO=2`nzad-qvzZ>`FL3At#?H z7L}BuU5wU1@dlO>(Kzdw=a|UqsU6jmd70W)wzMFoNyih&c`d)MabEaMTgJxe1K*$& zd!$IgI}{<4fPnkl&$RLEPmfhvoo#+OCUS`8>M9B$`g|$1P;>1dzg#5i;!1gW%>v(9 z9M@KRo^^ht>2f=$@&q4?yph|(%H1vXdy6|oIXzlS!=MbVPcfCJq4m{ePYoC%MQEl> zvQfTSO_Ud{B>vYZq`}Eq#f(#$y!dJr*r%ci0w$HAP(tKtFS+Qk(zEE6lH*3I!VoQzwCjvR{ioT0KtXtB-$8j@7yN({i}Eg6 zt*hJn`HnS5Q%MlTsN}8yVUADu039M#!kg~6t;+Ex(~kaUE=GOv z$BE}dXHNs1KPhM#R-g`}SZvGEJP&UW{^SorH?otlqRH9h*iuq9LGL1Cs2`)9n(BiY zc-CJq+7`2z3BL<(!YcIR0L-h=KbZ#SN)E;vN>$y1B;Orx(%#2)IBkeTi3Ll^A>R*% zG)pBKw^OCt8R|BRHy+39^g%@rnx|T)#xA|c@P)$dqU(j;w`>pJovl0l0fV(K_vD5Y z6(R7~nSa@HlhKW+;tt2^EvLZU^&jf%3+>-?!2`;{SSM!YMP#{bfDK&%Pl$#0G8-3weKf0_jq zu_iA%rF_geu;AdT?tMf^osqC%?4P|-iB!~8E&FUZqzuC~S#0a~odc0wWpILSfIyw# z+k$_VXdN1qQMcvOB@qMXK_($uFP)K#=6(}>UGiK^v_&KM+)2loThl|v28TJ#b%TfM zg2mO@DItT-q6E(B?Nq0uBKtW{mNjgIL4AE3+XI0pbloB$;%amb8iAqSd|vKv zZIM5qcqLdqw{bHv!g)mWBFc$0St2xQv;ybd*)5x$ppN5sVHsTZjkAp>3c4BM??xn? z4;Tml4Hx6NO2oh|hAnG3WuvbrSZ^~XYCCFqyhj+uHLZ$hR7M*3Z%4&llm}6Q>dsI~ zSKeP*5c584SR3Cv@HIUi?ye!2fzV_T{YWG>=6LeOGre{z-i{Y`P7kHok5$f)9(Low z#k}a?To8}*&qDLVn*G<%tC!z3Ch_vsqaP2Jzz5^eeT*A0%7+eRx12@Y^HoBcr3soL z%{Epa>$SjI16m-Ml9m*U$XKO>)A5_1D}&^8q$&?3o`0stMJO64gKOW7MTL)+PB~Qk z5BAPCD5G0qGsKAVR28Lg+=Bbm={)2q>KZp@VctDAIcq0qN2^ z0YVQop%apu{Xh5ReV+5;o_p@O_xxUGWj$-nIoDWojWNETuh??YiIrH)Y4cp!lymk* zujtjZ?TKA068w?VKe2u7ATyE3jd$9Kk$`$mU>&l0zf9+--$L2+H8_1{+{DG;PLj2@ z7XRdMnk0$zopRmnCx6*P7eQN8dDy%$*T~r6&X}@VkgK4Q1EaJK9%^){4;Wf3iMZoaW{YW>S?QfNbmE~4K{m~DSUs4`FjPYInY8IV+4wM$@jALrXS&z7 zCnB#+%AeB5P`=lRGlYe--cv-gIOs#vd%LWS)TBB{im#(_Q;7qX>y&Hey=r?-3trrq z)&n#C5+doRn>g&j3PNLH$@FVD4(nD4E{C7~h33;GiIFuT#jVAlv0^k8i{56ze+aCyrQL^;9ygi+oeaF=xiOfQE-XE3e)4)Y9MJiiJhlN~5Y zqCQqoc;F@lCjIl|(njZ7VBJ^NFZ9{+@&_l|bkSVeeTymTl0U+RUk19NM?OEjTM2hK zg0I~8H8&hZtLG)M#P1|fNQ>K>&OUIHChAdL9$rz%xGhG7;N+Hp;Db1w4KMeQ*OePL zRT4<<8^An&QzS?n1GkhN^j=Bb{glam@-R~%b&0O z02lr4#C+y=&LQqWiN4^sQaMzu|D{_cO00c*+Kd4o|5d*Gf&YbL0q~jw{}R&M9uRx* z0J02NF5Awm)-BUy6MVvY`0~~RTrOqW7Uhqn{Fs02>vSLudwUSiskq4(GvNyAycl+rZ7$#N;`MA7)Ep`V*bX!L5>d8Q>{4i1KYh%K`xvs zCP7F~i`w#d#`M~0=6adP-InRuQ^pvhk!w=Us>2#b@ zrD9w-JZ_{h)O$$DG^vmFo_$Y_Fmp3!+z-oUJSCr$_P=hliqhUx%$ zNZc6Ozpl_jkoUZdY;?wJ>lzy`TK|QWh+BV;W{O)E6UzVa#am1c zB*Gvc@Z_XSxG<##HuK`qyo8e0J+j4IpW2IEDiXfKvo`o{jjdZmklqY?VC($#62ZeZ z-4n_qd^UkR*0AMYA~nT#)8`cU+IbG6TII(bu1rV7NL5l_?T(J6>vErBL%(b7_{`)E zf^+6yg<{`emU+zkx5;fAFX9q84%drBwV=^yb*avUd%<(#8WA~-x9pR`v5w+q~>mQt*qK<>+(>EWz`Yob4u_|#L;V_7ML}L=ytto zFwECh6jEr@D1H6d7)JPIuWvMIhpMS!&t0TQ(7!jG=M{Od%_hhd<03J>ph+ll-t*$m zGeg5;Rtr27c;!;QXn6AsmKMi=Ea}``aw!oU&()i<=N%CRymgrayiPEl*I~0|C#?p0 zG%fnvJF!!F&K!xuq|ZTbgH7Mky$3lKq}z;b&%Yjf)S$tekz~!DV+y21%zPq$z$cMI ztX!$^vH)IFbFVoS=`NIoRjVVG>vX;Qs1?O|Z^f!Yu-ch7TXt;Fh=Xsx)mMOHHJ575tx2-McXT_L&YbpZ;w3kD^#2r< zRy*vTZgcd{zux;0S+P~dl(Eq=>7SPUqpDY|&PmwTQVF79q|GUR+?=T zs_4Jk@TvPs*YLg~VPf7?d2HaXhqQLZYrME13L@g00R=&GRBk)phuF6Bw`_~Zb|4YSq;V+;)h<&B8DGn{V4{ z!t-wp#ac5F)ofy*vF~_AZFN^U2bWFhO5=NDrLDn=Yubf7y!$#jW<21pz1!{rO{q=b zy2pQ@E9>=4kji)` zsAwEUdCZsf)TSELwz!?CrdKG9D&5GYL;n4anV(;L`gs{%f3h{L-dXvr-*dOJWpngD z+<}X=(Px3c>goAHL>9-Xj~X-`F4d_eCHO#V@B<(H_qmgqXWAYy*}9k0viqX+aWV3q^vC;m-)5-A8g3?<<Nop>tUv0QyG(&Tv4@Q3uXB?*ufJ#-|(M`dmQ ztQzFM#ZctSknv0HVbMcyZOJf9%jx}uEQr65$!BINXT)#Tl+Vd`=$>klHRz+FRa$9e zMxl5dEw2P}Hl;-STjAyGvr1VB#`^SzcGV7FdIN!@yZ}b~aVo2tfMgdP`mIiO z|CV#Oohe<%{AzWL{FO{LF(-a%_T8940$&GR-#?4u*Frb(X!`CWxb&PCSp8~#{d!pJ z(+j+zS?eB4VF+e<`R(Q2?=^NHhj-O&lx|Cfr`c9>g}R!%XG}0tlEYZPYsbnOl$|l+ z`gy0Ot@C+otrm995v`eC{^GfqZT01YiE41fVDH}!Q=I)KCUZ=`>_Si{dMv9+t>jyv z1FJ0%iU!vA#q~x{f4Sl`fw@@4%wxJwh+FfX>*Sz5yVE0jvjVaF!UW+dDyM^Ku8hZ5 zg`zoMnmGEBp0{OWm+!48rS)w@J!1LyL%QboZjs0E5=~~0p%ToWj`mi`=OD=xzXxzi zyt7zv4`QB&v)m%VF<}8Vwri}WRz0B~kvHSNYpmf|H*KS|R(HrL{Z{IJoL;iDi3jnU zT+ODeR%LJeJ@HwEaP!-sQf=jwj5YQO^-s-#=Atmrj&gy!W2{?Vx>+S0n&OC8Y`d+a zAn?wiEgp{Vd&6F8uE-)X?Oje`b_Ul4IzxTi%z?!;=2Lq9dX)oS2tge=g_|X;=UPJb z0*f4xL+(u{jnc3;1<$=1dheG1X_tO0b72lWqVo9+J^HPS-kwM;@0Lk#+OeSftcbbY zuiUhcVbG*mKtCJ&SZCq2&*}ZhX{pU~#A_+H`PT-+zmRj=(e7Kq4u3|%=fC~kK095d ztI^S(FBXvE0OjA1-pY*P+dD&nJ+ztw>i8->0*Ib^z%C;^gr>@5R3LVG@iSJ652&R4 z+JdKdYrI+bjmHCDJu_bcd}C|tP}Sp&F!2;Cy1M}}SFCxvV(ruu+*`yhDxM@UJ*jZ& zT=8lH;p!Sjd+6t9n!V<)qcu6-x;#l4ba*uS-C&^SqzBq5ySm8)g)9Af;R6d8OQdW{ zN$}%VH#QkIoeEHQa!O(ocddMT4##-$&dWOwB<)TZcok$d!3$anZn*TS^#edg=#6%j zx!3MWDY~=#cWGPdfCA0ypgA@2WdCHkKA*$+*6qmn>Ot^ss=mmeV~m~;f5r%CBaXNI z*HZ0+WtRqWAj+~B_XZRrHGVPP2DDFFJn{9MAs0F9u?)5i9KkzpjQactO-=ysS{GE} zuN^GNSZQULcx@v^RAOzieA?rCKkZm<#7Gnx>jH-^$GPJLcB&Pq1#+e$o%VFUvAdin zGoehQA0o^73XuyA`4Y<)o^Ck*UADM+U2nK|{dKQtS7Rz(c8c3iq5d$w@h;?Mr>MxZ zp6ftU>is#dp@#vT_IjD_z2vky!(;1DZ3&qR$b$~;MB+}1{mY->y7)2)=xrIiV82vx zDWC7LZ^@OXa<%M>xOg;s*~4P9?x}a&?wb_l5}PIk<%(qa|GYV+A|G8N&d~9!ahv`* zJSncMuH7dgrfyJ!o$_D=MElS=P`R!@vWg~9==?H5Q+;xo-OFT5U8iK zc58=3HNo!>|1-Pp2F>?G^}V~gFObC1fBB;GIV=2XduWKmL;|)(HO0*$6OgO~%n4Ca zOMZp@)Q1xrx@tfSI~K~n(t*cp!l%EB)qA=c)4=+{>s~+=Z~->(Sjn^))AGXDPN~|6 zRYE%@Y@+blI!B??x8oaNB$#H`OSj))QBla-bmLGaTL9$X$}&m;pZ48MtHC-*0g4R z*7!GRr6Vsmtihj%qK8!vmFCTzIu}+=GwqT`YxT5i&C+xpJQvfn$Y?7a1<{hi=jrwJ ze1ug%dI@lI6XmO0W^w~Bo$}c0Vg_nx+vdxg;O;uklL)%u){UiFw&f9o2UJVB1RNZ_ z9Tg9WdBSg>J~DJH*v`I@)c)*UFMfYo7Yl7DB3;X@r+d;QsB_?Py_Q`B(HYlkD$mO{vvQV}!+UMDm<`TUoUODM7UUC?z zy+fs=Np!7P5J;%nJi@1^ zL{27=@~}E$CW>i8%7LCpmrwR&9(ya&Us_flqia^8W8hJp>7a@De&6{HIr-RpJAZkR zH<-nmpF3@XFTLOchjXUwGpnExE^n|d$9*345c23yLkaWk9t1~GQc zb;^5F2)IvqS-pdbfM+Q8vFrTsJ$D7tg}ppUPPg+Bo~@^I6RpQ{9-E?O&S=P|;>$lH zbf*ry9n@HH8z0>Kk!g`=gw4$MI@z1R%bw|4ZM8IU1@(r)*nS2v!>BE7|oaWp*1i%vneUaE3o!%@8LIf0hPIPZ2 zUfEK28sQ)bZ2J|3ktr3~Wu)n0`=}9#(m~j`wuwhC-Pdv&X7wkEKY#>-`P!XZokr|V z&Wz9G%AiGNgIvMLUXMQJB-+Q?8^fBQ4flYD-!VS4ktb?o_3jrQB|*;_)YzbzE}93s zq327wT+VwwLsoSRX5(`(775K{O^nf?iCF!ZWY`ldvx1Znhv7n}z+ZEUo$D!>Oy|ek zZr5-hUOk3VQKR2wOo;cNv=YjkBXRbHmk55?R%UYvZJ2W#m=;uOu<5gj zdW0GAJxM>f+2^#QV`OBn`EDUT#;&>oguX7-@Nf9ceQb9Kx-6ZzH*u|;=(`e`_H74; zYX;&1diu4uf+Uu$iyD_-RE9<`RFf*SLB;|VI!0+=4c~vtGK+L9uMPJT`%)OU!h?cM zKQP#Alv^x2N(VDWDQFh{0$=DxZjx6Bjtq&3b<6r!O_p5sCQ-WvthjZqX)7P|l!RDEDO6I!h zS(fF3%E2^cOc{*rnyZExrg_=wj%ra6zqyCC%#WK3;Q1}(pZEq-gbUb*D>gN~D&A~B zWf-`Jp7nYM!$LiYC=Jb~5T$>iWBTMYvIfM z*WE!rE{dh|cR!!wnfl}|_efvoH|dqBQKQ1GB9rgL0UP< z5i^+Bv+4#j{Mix!9a*OuCS>|(7)sqA8F1j~-Ykrp z+c4LIux!!9xYxg!3Jbb!$+^e;0B7CwCU4@_gn0(u_>wh2LiU@NNUB87{(&92P3l1NN-kB%)n( zLLiAH8vrqIx-}aR<+ju-`#y;0$IK=y-Q{I&sYdsxItMSvi`2+BrxZK6dtOxDX<88e zK*C7&LBe8h0pZW=r8wLQh z@}$BetmMDnJKt6N3(yK6nyK*#AnRN1E_zQ-J0Kn(W-s|%l07-s?T`z7?{t0p zXjx9^+=(XZEY}$o=m&lYqF%pbe3WGJ+9i2zlk2W)qgKfP%y6j@`@3tnx|#htDOyIC zL#PkEHXTe@%XC9N-!Qh7OKx%W;5hI0*5pj{edM5`cf>?Y5&nalsC;4)%{u%>BmeQS z@74#iw&?IE_aN~4UXh#n+}a~E)^8zZV}jomo{aK2@;2Zxl@$P4I*qAYQplVh+v>o| zxvzGU&wQOqS_C54tJ(_IYoOyF%oSoTq~!}_#6qPWmtQ~bV;+Gj{un>npQ8P^^|>_Q zsR~(m1B#+!*saewo=wVTiUW4ie=3eIoy5!@C?37i#Z~@lTvkktX7Hs;G1p1Zf=4hTjhT%+dPsSXXfMN2D_h92<-7P107W)ifY*J(eTC2cq5yw z?(P)io|!&}{dR~Cf1SV7V8rSvCgQ*J2JM}dVGDP30CN-_c_rolN+wucOXaaKXlL%>w(hrN?P?V-R4sk+NvE;+jZi-nmH}$d3V(YRa;z0sqIh%)$&24PV5TOh{1QtEikoFXIC#GjLl}g3;j}KqjS7@ zHp!D%L2X196Hq=W7?8OjjFEC-x}n)h0P36HDHi*s=H!$mzG2p5k{;snrD_@RzQ%FD z8B<|h$Z25YH|ITFv83*~Wwf@LHS)$gJ4CH*sU*_?bfSJ`2-nY+hpT(|sD-G}*7cTn zrtNS?y z-^)Fo>Z77Ee+RyPpLHKBCj7%)Ra41FB8S)quM_qIW`GEO@rhDQk2Wtpl0&8(3`(U8 zyqtBUm{puicRW+mZuwZQdOw3#rZ@lxpH>725A9uDkHsNui z73`G3`s_P#o=%64iI7{de4XzROCOo5!b5G!%>-KeoqnM1dqhuavmsWWc<#;w7VXpN z*(TOv#eBy_nd;JU@e^mNe|nW>#`TGP^>tBbxfvUHotzP3>M%8)Bd|L0o;tINLqJTo zEkb!*DuQ!#f=1kFN}y@}!@$YpXaY=jf4!liR4Wlx75-(8mWQLkeMGNJxys7nn6p{a zYte~$cQE_|d0WAKmFnWEXX-q~zzomj;Jjk}a&^Y@gvQ`!^NY;A-zSnZ6=-c!xYClu zK<*A-^^GWsU$X^&m5zFL*LkruVa1kod`}GVta70sQn8;nZ@&Bk0wmTm=>?_cOh&~q zyJ?Z){(<8EjzkeJ^QU=^_iX@ci(gz@(+hi0fnkrk)lP5MS!ZLaQ*wAK76SNTPr^#o z26=DyiwhPvuJ%bhqfP&6o8$GUUhDz%lSh)%WA!Lpy=qG8(qUx)8CqAp;cjiia z4~G#Mn;B5)V)s^?ViYhcf#W(da+eP{J@Sa(RYtR+B2U((_ay3eitc0Pihx*?wxUF> znsHNExu&&ojwnmJhq)GOk}Wj0`T9*K;@27Y=-23EpUL*GAdtpb5<=I-XVL8!b4!)U z!1=4)l!k|!A1Y6)OC9(VTzUH`mNSR~nm+3-R^HZI8`f8bB*wjw;&*|!QwLOzOw4sx zMfBgSE|`O4f5b-oNET}Iu;5=e*PV5;Lj-*_h1m{veOP-wJ2x@txr8{jqnHf8E~J`J zt^W!-hda6$)qgHe(jC)tzkJT zw;8KzN_Ul90H)jJ&LpK?)E;flwI4wSrEqYvdW{0ucH+m!+rk!UXJg^I)SiqQIaS%gQ z`XUWmd$hhbFXyAb%-eVGHre{7LPRoza%V1HhMbg z)(A{c@j1k34ue`{6B;;2^S~|d4^xZ8<($Bi(<7w%^CQYXua5}=kA_{RMwk*#$fqu{ z*jwr>mXqP%Jh+z zu-<^5&5EeSocI?@pNTl_k3%o~<16Nz0~;#3{b5~mm!GVHt~{u$G^+|S5BrD?H&*a* zhHL*@pOa+{Bp?E^(W5a#U0y|h;4?*~3~$PtR_y&f>&nN#B<;g_{Mxl1o~vTgX>4CC zLn%T0dvjmnv$YWVdSQs$W;N(s`oW#pSxJ$7=IXK5u}3qF5|is%VlD41YAiT{Xo#14 zJEk(G(Ob18Q-s&8Q@l;bT-g3#Wp<)aO1Y!uIP8J1P?SHVF{|04PX_^*YaehC5n8WJt$$q~?BLT}39__ZAlGq`ConPV*N9HXh5;C|2AwG<+iP zXvw+9xS&C2NYY8bz;}Zzf!)cYd<(8C0=dei6$FQdu(ep-!pplxRmgI4{ZPb zuv6UoXxBn>`KF4@tL-S#6wfF(v*gSCpZ1xk>nQ&|hw$e^f>I`h=Q$oA^BjgR;~8mX zv2hsfhzWQYhj-)Yqu2O9Mh*1c{DQBOOP~3-2pc&kD0)1*l6p>0=H+>j2X|E+ac~FZ z|I89DJXhZP>c>?ag`DN(U7J?0Q*n0{F<_V(bytF1UUJqzq{Qozk~8hu@}=pnKKi1j z(9$d#`kXu#cR&`oDTVqjR+IPzF&{a`yYFghti{JN~}-(TTXcJ?VC70<8g8#wR|3hs03tsKbVn- zaRJH7Q#Db6$EKr2hG%89s@)@(?(l$HIkPw%;`N8sS!}U({*$y8xUxkhjGbT}*8}Ig zN;MvbM+P#vOOaGWEuC0pk>;;Gk5C2vtcwEMnfmQaO182#d;#XMV7bYU4Kxz9;G0u+ zaF9}qoeskCJ1p+HFll60%W?sW)dS^a2F;dt?ftbG8a_sy1BTEK?l!neNM!A)yvmQE z(oPS(O3(CxI?l(sE;5bGE1w#x8#jDMt6#cQe2@R4U-mV?#Nm@WTBgPMo>0Tr0Xaa* z{tj%7H*B`5>Z+tU%HXkFR-{cj=U3VykSJr+w6?cw&*wSAmvefpM(Z52rMefbwkx8H zGy`p;TWtc?6t*aNVwHzFxjF2;(r+=zbq#MI#)y{Xrvv=A!-2DUtNgCVY29vd<7On< zGKJ}{R#vwexbfXEGvMt)@hJ1ys>CSi*xG#%t@TgI@?nm$d$5}Hvb&C)9Cd?(bD|*e z7{)1?a_iEJ$%5&TgRnWuwq#PUO~2X_MsQ2iq5RzvY~QMZqKV_s2dPeLm;cOZw;Y{Y z6h8cuwuib0^~HGLB530qCdXyUr$oBV{4?5#!!>Cj_`Yke!klbZLgS#Ig1ReEA5>W?l4Jh}$P9)@aF zo_!YRm4{XK>a8VG^Cup7*?%l_Mlk${jm(+bY=9#(>iQ3NKgY^C4kWPtvu}L-Dua;@Ge9f@Mp;r7Jh)n2sZHllo^Bt*3GlgCzS7)h~ z)0h&M0OtKB2j6-4R^4fbxO|YY^y232ZSkBpwv9?vu3eNpp}nqzITY6Bvdr}DtP2y! zQ~zT1K;O>Yg)|mCXVyR|^v}S;FzAxVv}Zr~MEr+^*)-PX(;eTJ=7QctT!zbr(w82_ifMB84HfP?_yDxl2z<)b zdE0FZ6s$Hq6)1>WS$Se?Yq}?C439IV+@g9-FOzclKDkroQ>o)$SVt0uc)$U>aq6Ml z$hza`)>j=SiSWhict9top#cf|NGdq$Xs*9q755iOtJf&Uni%a-?~|O6Mn4sSX1Ybv zB69eTc51*p$$z2kyG?2TPn7=uO?v(RvuynT_s{;}*1s)LK|IYzM1uhUz^Ciqbk6_d zo&f*v-}nDC`-A`7>i_g7(eF_+QQ&+e`kyuZ-~2)Thg*95Z@s23g#VS#10;yPEtQ`= ziDo7M*MfgNO#lE(q6yXiNavLQt%vzP1Nr~y7xBl(|3#a)P27#~Tdte)|5Zyg@B++t z5;+p@Qv_O#iKU{CW<*!5|3s*N@f-gW?U?>ML8bj6v5eQr@3lHb^o#=R5p6C378JyG z>y?22Hd+6fvVZ-#{+F7v|6I&}g2;bX3L^&8L>k9uC zJcyfem-AHD-c>dDuCHczMPf*u+KNykoAgU)fK$0wfr?~l1Gke;UaIRs;Ig;bjQ2=K z^G|@wQLH(rc&Zp$aR!35jKzYzHDOtsxuWJ%_S`OouJYSn-}jpbSwIaoHB>3vj{wki zVgiRHE=5$;0A*1slZFtf>OJI~mrGm2So=!y)b@yqh)wW6((cNnro0-0oFUiB%6kt2 zmBF|CES4wr`5_wIW`T9o&~+P*tzC(yGN#t>@SeVIwO~hH>hNLQdC+sRn!stnj>SC< zN}@j@kf=%%tLXPYy)snfTI}&>=PS*jhAVBX4$yv75}TBX*uQ!$2@Ez2=-s$f+0w8t z;yN1v3cFh06mv9LWu8JgvU;DkO!))a8CI9#Ip#M0^d>KsouvP~4Xh?7XM6s4A-!No z-r~%7kzS;twPs)SI=>~dT4D+3ZejOyC{@P5I|9w{1}2 zlfd*thLQJ}l+7>@aVWD)XMjrfnH&;cP|=da#EI%>>UOxP%hMwEnHZ(zT&$C&MCDT^ zQ_xfHLc~2yAFBCZyGhrxG81~JeES9U-NnVlv5d)brR@{?HZdwHeQ|*@Pi<^&6%JAg>ZFLM6pd~xn@RO+= zaLcBb`|cqB6cY;T|2U!y(UexU6j!gVx4?MnHSd5J{bO$Ju+>@W@wC;6Q}!aR3X-Cx zmL5K=tw$^jb@|?ebK8;v+$8N5;K>Ev_}{S|&uCRq@zVmHx@?AXvlQeGP|=~s+%6EG z(@fZVA9^no@k$)aZD>bhACKh;R3(;E(pE;wlf1dQ&J=2onU|~)z^()SC2}^6Kqkp9`#wrvx0H2pk3SO>oBxa)?{=`S0xV)17RrCReWA|ybP%pP zl-cz|R(mIAtS&oqJ2ySlRrtTATh@&1?*=LLtX1qLpBfH)anK- zi#~ea{caQ%p&jZAH-_^CWF$%(ZDo48#%G%N==shY)86^7S#ji8mfR0wVF~J~3bT8e41fN*0-e^qy zZe_m`=3WmMn*@%P|1urn_eu;NPwy?aASz(*1u}=)L~x=|4HEA^Y6Zr9rVb%@y_s*5 zskF{^yU+*lxk-S&kL{u#39 zuknNcNxG|3dNgpJOT)o<0jVILAdmIw?v;vRUiW&VA5=#bgSuHo8J}p)zsnlhom-IduRK{6g=_SrTkxQ)0{B zqyMpnum7DdjQ?y`|Np#Q#b)j65g~URe7-OE4#|KwbG^q|v0TWn=wPPpU?xTYp#3ki zS=k!=JrxfqBS$|K@Z`X7SHQW)8wjzc2Y)*i@|w9TI>QbEy;~;%SiZxOd{3A(aoRt* z77o3-|G8?0Vzn7Pp}LDpOj^_Xl-cw&2i<#TN#|Sht8vOm==zc&6tO<cg1}v0pE@4HIiwD zP8MFj$wg1AXq#=V!%!88;T0{XR?C!zSqi3RVz+ssrH`(jV~M7K@*C#m4*(zgs*eGG zxK+4C@M88rhVaW--<{*%7qx#Vp?Ot=+U=h)It7WWMb;A0Y*xkwLgliujlV68e1r-Q zY{XPwY3Y3Pf1x?r7o8R&)5P;9khSs+(}cFrQaaJoayNuG75kDCYp(uyS!J&BQqWei zFb>V6eF%7J?&Wg%++VR zU{W>6YVw5_%U=HCtq)FwbwD83lR+kXSbbI!s+gJs9;Pqmae4MGNEC0b2%Qu!)9!NS zG~kIdqjCwd`{pnx;t-p!k*gsFGw-)8U-fGIH4P^;35XNzHdy_PqdczaU$|4N$$q1K zB&DHaEb;q7s6avspRpd}+5h>(^<=HrGZ1rc7tLLkn8Hd%;aVaO70GvDc!RVX8Xm}* zBBj6dtd$*gj4vCKEE5_xj%c#$*GcM7p!QVd=A&fD#Nf6}Th{#=*IU%`W9vtNQ07e- z#-#bLkJ_Ez86uv}w zpmEOn+qmGKpfOJCcHJvM@w;(1DYHr?zouZ^kK+g;AG$_66jeehMcL2mEG zTO3~NJaTsT)G@8bWawcU9$tUvt(Y}*Fl2$61yyX@p&Y8MQ?y7xFH;`>@MiC}WAO}7 zfa#goHN4m`9~9Ob_PHAfrQ`jIt&!nLG+puvGA3i{0_F zPN@Mo6cUTg#13Ts!6QcXW<8CQ!$zG?a`)Y?HVoWnYTDN_epO{r)mXu=3IyV2FD*U7 zseYY|XqfBK$~-s=BVx_HJa_6nN4)9wHr0<@&E489x_efnJLqlq9PrcSLV%SGrn6Fy zN9!%WZC+=7))PAONx18Q`UaPX@4*fZ7v?Narxo>-uOq1Ia7j_Haq6n7vY(kKUl{IX z&ling)T3b5)dR@aa=m?hkUH`LF+c6 z2qp?q6_Ipl=TG|g^tPf5Tej2oCFM~RS?ys}TApXFk|%cKuu=w^Cid=I({apo-uMOD zj3x`-abRe;n(G_1N9y$lrRohgy0(H3?0p;QDQhPghl+6v+EsY!m6|oo)`#u@&s1cp z2bPB){WqugxtylEZvnp$mZO!GPDYTLYw7#G^3Vh1+>mcux3+l!bCL3n_L9Yjf=8bh zcazqJ@s+E;*hSa4OxEc6ro*C6SGz+1nPuQY?4SM&5fKm|C()sLZolVS&Zo4E6g_oq zJ-d#PAa$iJoy(qmy#^TQZ-*ya|7031iEuMxlvBdU1Z2*p_`48HF%k*tAi?)B-nM6v zn}?=ruOcpfkM_CBx37=>YU1ik+1ZR3nQ^!ckbe+d|Er;aLV58$IU$n1Vyk8(oxb0A zVe~L$&Q=KD(-d6}+N+N)2x^EdOxl(Vj&snGEw0+Jx@m*2%jNGc@s88oorda`Iz!Pk zd==yMvSWj!z`3;PJTQGy`;Gp3%BH9~tY8?)$A?rk8R0MF*s5kQ=GopxPyqfsRCBek z{>r~LJD;{L4hpOiUV6wKeC!yj=kFpKmD0LvyvYD&2b!swsHOgBGF+PLVf``j8TZF& zyV-BFMKgL*szE?f0hr}p!B{^oz;~L-fy!j0lDExT^KCkR*ZZ(ISNZW^EZ4Gc%iNm_ zp1z&RVKv>YMSZiB4L5;7g{@{sE$NHYDfPK?E~jR>A|T*>tUOaN#ZCd+)sIl1tDdjm zj?W{Bsjm0p3<4&CjUPG*EjHs6Wce@C#VQ@=qO=pllIu0l&zzHlnXvH3Jjnr|%Vj-y z5}Qkazk8e5OaKs+{8Y;(5hYj2tK~AYSRVxLX@^bAQOw|AAujUYNj{D$yHwt7dBE)+ zQN5tQFCLH#u3P@&RR>O9eu8+&Jp0z(m%8%@W3_YG${K3!=ytIU$4hx7Zv<%uB=xia z1jVcuF7g56rZ0MjO7|by4|y%GNW_R8q7M?2V?_kaKGF?IX@M$^DO?vRAaW>C*LS9g z685mdsJMn8u*~}>o2A|OGOe+|-YLKO9^XE|3b__?E$g!W$Kd{uExN-=y}8EQ!^hn8 zz=?v)#Z;BaZP-6E^>Ea@=FS@ii)-1lP8-IH2$xairZDFO@u^JP* zGAfN>g+N1qV7U10i?V|DKPTKji`?@_M=v$o+Y4mmvmZY$IUo%yi(U*hfpI?1Ruyzr zFeW+=y%=sUAa8|B^wkhpI~niC?n&*}ZqQDpc*RBcy%}%RL_?(fTg@3HT$tTN4JH(i zA<#mT)Pc-*Ex={hZ*3GsyXEdn4X|ZHvX3TJ(iV_Iju#mvo01BGp+0uxm&g7}Gb1sd^7wli|5dFgs(b#gHmM`ArI-;fVqPP#Wru};qamt)T=ty7GJ>P5ej;Q5 zfcfvDQVdeG9U^OrGZiNO`iR)4hMcYpxHn5IjQTN~c!I{JTLn(mGE|5vB3^NgWOeb+ z!V@F=W?MzU`Q-FwqAv4ET*Qbvw0Wdy2ucC~EdCBJ zWf&J!`7&pkMf*9S8G8YQUN)@+wN@}NU7Y<50LX?rS&&dc6Eo^RJP=dK1DCGuj{GkcxD5+*`(XppQwrJHaS^hMyit&so$0s)pN8DTa@-^m z^a*3Ekk&~;&*+ZAWv+SxJ|pK3VU=38-5GeL9il^M;5O<4V|?+wf~tgTR+%S(79Hwz zsTZX+_(cMSY7S52Dk%~AQI1!P9Y=%6Ym*7K`HssT3j&@s=;se|mf_Hf)=yJ=62}t? z$8iu%=!prwGY2nM`FPV6@}>h#R>XQ0T>1Dm6$TYz1Z}@g9fAoQJ%QZhY@aK~HGz>iLJRo);cWbuM=A&*J=W?zDC z=K3OH`}{{&!1!^3BmuXPbMZ}qKxfW3j3lsz>>-mmZU$Kixe5^V+Z|Um*x(KOlfdj% zEY(_OcKrli{BG=J8<^nmE~e;BCG1Pg4W&oX;>+FCV@0ww7FS= z_HQ?wJ;4-t-UlHZUbG+L$476@35p^6*Mvp&p34OVY(}5VDw1#rA!ruuvPSH*W84Tk z)^3I(gebDD^Q8_9I^^h48oclmi`oUBIjzyf_WP z%!KUN@g@Ug&l|ugEr4Ddh z;fiCFfZwR>F#hah$he)03;5&aKWjQXPRas9QimWztFPQ3njvS%l#@XG9|9@`vZjsy zQ9wAOJ6XV^kk4*Lkc510V*DiBf#}0zT`M39`{KeRwkwG4R>`$5hp4De6#fXr@A?+uN8_E7R?*neXna7JcW66)SWF z-aI|$II@n5mMy1sR{V*=P4ZDj$PDX+66&9|!yD_H;}CmUtcXIX!iDv?FM;poPtLVO z$frUHDMpIhK>o=Yqub;jY3StBtxIzqR?WuXN@v*YIcW~?XT`ZVVvf^Y@yIP@iSDM&hqJ7ia} z@0_Cv$mKNo26xlDI0?Yrj}m^W9RD0u`xz&_FoKn_*RTIdXzQ2Os$MR4u6H~i#Isfr z;`CiyrVMQP`VHO<);sstbEv@Hr%7w*kokS)pj!SLc>kAF4eMqn;MT|Q{1G0)9fC~a zMS(0=awlk%_h^=8)(6xpX&F(hf(+YT?z&GSZ<)YfS^RrnxbY)wx_#uG;ST; zGKZ>9QwL*PUVSXlyVlMSUbwO$v2?Nxx|rhn=r8D6sBDhaouT@ia&s$W`LpExwpPH8 zosM&*rsyMX1b~s^{I>Q$$4C{;O;g1H1AznZdZt&4`LnIykJ>LS{S?mr56s^jjlcBs zr}|nZtNU3#{7? zdXw7Mz^VpQ(}6qGFM^nEi7V!BB?n5heitni63kZm2U9~2FtBfzmbz2JW_jCjJ}qNw zs95Aj-KhG}$9WbxCY0#WQTUy)qdgSXSm|_Skft->=wJmXC8ToKKl^~%J4alh^jK}zzukEXt7H{Hadbc*&?`Ylr2V|Q49Qz?+l%3vQc{WJj-e|# zL%=SQ_X!nE>X7>uSBM!o^-Q6su}x31XYc5;KiS2wOj;o6Z|2}MnnL%`g=3uKdL&*pg3S|4KEJGt+=#6ad#~g zEAERGXK{CTDaB=RYjL-w_~Py?Rw!=8rMQ0d%G|lnpZt0=IVZ`9O%msa&B<$o!_)r7 zHT^U_F9NFAPW7|>dFJ4_pBGivM}QZbvjt$L`fVf&<87N02Bht6RoD^NIsdN171xY3kc9MfAN`67J2*3dVo( zLPu1y@V^ESlsMp=}V*Qm{`bI)e7kyEbvLn zC#PPU>_@a5PC4hBF7vx<*?Y=H-;}H!02*c}ZN|gcmQPJ?nR_1) zdrUpMe?iPzT6jOFDBcPQ*-}l0T%2Xj!nn z^9c0;Fp5`%KQGVBl7RYcoRf=Xa_(*rGc*n6XfGj)c<;vDRPV*$!hk*bu{dOwx}$OP zI-x#6J<#m&zGS%(ioSS_a;7|o~orj;qj{Ta~i8KQy4lP0aBj9CX)?R_1F9?dc zz@nY>gpXV`N*GX?TDo_~nZ#YcO-gIs0>Ime3i#m(i174icL0gIF9yVd%uZ`Q3aKOP zj33BH5*KPYj6P}di7%6P8wOF^yDr;aT@=lA)Qw)YuLW}5`3Q4(nW&Kp!~KrhdX7R8 zdRWV1(_A4Fvk5!_V4<)VN4D)-f)Kpn)qAN9VMi1F0QK@8glc>YXI`>4y26X0FSy^q;z?fy&ol*WQG%$gpz>dEq-4`?qB$3Nalk5HuzCeF}a z>irh0>V@&qdwQuyLl)I>$@TQLkq&i3Y95@AY8kGn?FKZDSfwY+y!Df4<@U;G_w832 zY4U*0PrmPp|yDxKVA>ULI!t)H}R@+~5yexBwfFoGF{mSis9-H6hFN{Fy!7>hS)8

ki}I>Ty9E-fH(IyYvE}rjh_-RyL^6T%Z?Q1-ZHxb=2isXa4~&HZAk(r{j!pvn|qCU1M60V zLC2N%)2P{rIW{$seFE!7{oDZ6Yc8*Tzk~D9zflMrAJX0{nI6knKJVmw6}xK)M}5eF zFajZj772??UG1OAoJ*&+D}&Mv9nMMHRUiARw+*1bEF_cZ`#oKh$CiDcs-X1Q9sz{( z`+wC|f6srf4p#6=Nx!;KW`{<6%E$xDa*HKmd3*G^aG82`A&OKy9_iWg?-qn(VdJD+ z1<5S3v1~S6hz~E_B^aZ?_er&!#l>X{KOLyg`Mtqjul$7pC&g!FsE17{WzFM}Whl<3 zbjwhio)0W5&CLo=RR|v=@?X8RZo>yWaBr0j!v)P^XjQF8dK3vg_+6}7|Iu)9Uo%yP z5Q7u)C|mOHsqJ0b_EV%Uio9f|Tb@W9ZGd9QrMaY59^IxRzn z#k3B@T0&>h5o^86X}XcA0mp@x&T+u08_h!7zYt7?p3#npdy~w9wR|Y+;q&~HcOMpRC zmBAgULzen?rXM|lJH2;5^0$<}g_7n8iT|`#sf6?$+4@RD?H@!?@*njCCVQP_k!am9Df7XJJz;r`0V=SSjstL4mh3c<590iVk4q&ZDe~&!$-`TYuZ7V& z<8PEn33%ibvJawfCn<{nIDEl>Cr1taB&_p}dpTpL=?4`n?C~CJRMia4J)qmXhQnaa z(9G6WGCPk{A<9V8Ln1^GF>KrkF$0V41JiedVOqN8qK8gnIJ&0`mU91;QHtq_1+lf% zw{ID25A7$}sHqu`yQ*%6&Jv`H}e@QjL&$qLh`{nWM5!pU-ojy-3}e(5p^ zKg6mH>OBW%c^QTEk)LG#{-GfsyL9<0Nrvu-!Tsl>v-@m9QGJO2*B*He(*(P{F>jVq z$i0>lb(t-Y3s}C>paY{F(J-wxN4~nn-LBIBMY10_MvBLXy=yb}IpG!zns3OrPcdfS zU~u@|r=5u^2a(W?tC*qecsze=P;)ekA=YJ6t$EB@T;W4n+q@zxX1L)&^tsiea#4); zs`xj49i5w6%NgsW=iHbiVbPXRNeMioIt=z;G=4x}TheGKo9PO(NKlARbHDkQ-t7*SV6xDAYh;1X~?3uNTA;tHN zs*t3V_;Mg1jhEtxE~qN8b;PPx4ohpW+UP9WB4P)otxhfoP3|?q*t%oxEFPpKf@z7; zX}+9cSIRKYZJ+}fLrXY+;_nDDlk9X&=A{E+t>`UN^|`s1pPZP+Hc?&*17ke&^mmB` zRIIqwDY{lstA`V;&=|h&wJEKU_~!}gZbQHIMsB@(x#TmfTaV!OU67hxn?U@jT=YlN zV|wS_a)^nEy>+#TX2;1wyJnH8Fsxm=#3SQ>ts}0$$of7p66`jy)i8%YoLxQ(q&V*< zx2B>ja6N9JQ=&5GlZywa?p{Pv2k8S|eaiu-XQv9Jh$@;Ll!ipvpYpO;`YuK>E^-F- zZwn7#R4hu)_)Kb?1`%uiAU}tCmi|jGxcFgo#n<6B#1fI$oEOX?PHx4|Mr5$9ag^T3p#4An2NZJwf%jY)#DGyx9YY z!AGAmK-OJ3_foGqSN!q`kts%=;r8-TbH9%r>?c3;ckwZaa6Ef3M84c;9PyHdc<1Ds zUB%B-8qIC6VhJT{AwgI`pY<-@c1HMNuw42S@)t@z8g(50_4l+gy%$sM5=17DLc3eg zCh5<-lcTaTi89Arp1bPTzL@L0_3~_McvC~`IhEz67Rc<9kU|Gj;N`1T%E(*4xE=d& z#QammW})<E>7J528;%`lS) zCn`-K`AG-b+`&R$OOe|OqrU52G4Y^Hr6<~Xm@4m0XeLbwJv@kX9r9o5`6R zcDp*FT?^O}6VRU!9gh4es3GKz8jK0ZS=W-HQkVw8ZRSO#E+`1T{>SKhK4Gr%uj0?i z5o@fg&F@Grb&ZYirypFB_+wz0v%Qr^(hj8bfUL}~0hnDss1$_Vme0nSK!#6$u<^RK zZ@pqD7&J=kEAQ{}B3*nT(nqui`dkUK2X7b_aJxNRDs)c9k|lZU9a5I5b!5D0Q`KR5Sb2$OM#O0f#MxIQC=7q^$@-U z=Xn7p3?EA?-{%JTvjZ?Ks}(#?O;t+71xjqy8$CepihSXmq+u?fta%P7>Y8_xJhGtN zC>He1u7jBoL|YOq-;}?Y@2lv*jy~>4BO<>YXsR)4T1>Joy{$v~(>nQhY8}Ac1PMm_ zqn-uD|7#mnpj)$O!o&&CC zD$U2sF8~>mlGYdF8XBk93pa8iaYUt?1_s3>Xa z6ucMlB?#K%qoj8nD(KXpEbw{M6y$!7=HJ9UsJk=8jXNq;7b=^eN#m9%u6k6r`#6Y} zhaI#^1W1)?PGcUWZ+ufPOE z(&B;|pVDWdMI0W4;yzz(bcc(mP5WaSA<^aouM}hrJVQ(|5El{++9l_%N9NMoqhC<+ zDubxm(VN%WNtVLuSMhfMlYGDbM)!3uowq}y4@-)HEkg(B?6&ml&BgBjH7mf~_|p41 zhR1!dwG&ugdffQk{qwRL=0Humyy7J2dW70Lh{U}F(1Z5J?>Cg6#CJ?%Ds&h)3( z>n$(Z{tOgozzRhItG+G>00>~C2wZ0ja(!UGmEYlnlpmRuSUDmLjotU8a>T}oWG?y3 zSOA>U3&rqKj?vQmH+v>-{^e_SRYu_rAK933n_3x7tlCT31M(OdsY=Q-z+UU56Xfw@cSR+8iCZPP2Ld zYu8brcEqhd`3u(cRagNhEVra*xvXB^@*2KLr7}rnC@Wr1=%)C6|6FO&QU(|TeFvPk z^#0)`z?M2UZ$+4assJ8nvMLNDs=5PL*C#VO>)THdOv%uiRE*E)Rlj!rHHqhA$ha=g zl%j@e03Ad9fXg=5KW9L*zo0MuTQsVQEfX-4)u#~^Hd-wiIxf)LSJ97|d)=LW+d~_zQha;& z>}V7-eDjvOvZs$WYtg_CVGT;0Oh2?qzy{NJGB9gp!cS(w%X2u-ep*g5yV%z0L>%n& z=lrl{HdgxvKN-VSKd}yVJn}{lD;dMkx<6~vNlrgs@!5jY030QUd`d9o1`Xqv$arjQ zZp6HVHZPc_AF&o!a4~*H3iSMI?9Qux#f?y0;*)8Gp`Yz<;L935O!*pIGSgoX*-1Ac z{O}JI!B+|AE8Zjj@_l7kBW}N3@gKP^uhyBqa*^_{u&*SSki)bx{yT@qj}3}Wn4FdZ zx39O+=-i}r+{{hfKtg6Npw|Sz#lgkF%E86T!L7-~Da6Am#KFbP!7IeUk@Sfp;QtBi q9nGyRy#G(YrOCx5#KkMb&CATe_bT`t5Jmqg0LV!zOI3d|4*3rrI-SJ; diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py index 1ff75a24cb8314..7bf8ec99fb5922 100644 --- a/tests/pipelines/test_pipelines_document_question_answering.py +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available @@ -47,6 +46,13 @@ def load_image(_): return None +# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, +# so we can expect it to be available. +INVOICE_URL = ( + "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" +) + + @is_pipeline_test @require_torch @require_vision @@ -60,27 +66,27 @@ def get_test_pipeline(self, model, tokenizer, feature_extractor): "document-question-answering", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor ) - img_path = "./tests/fixtures/tests_samples/DocVQA/yrvw0217_50.png" - words_path = "./tests/fixtures/tests_samples/DocVQA/yrvw0217_50.json" + image = INVOICE_URL + word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) question = "What is the placebo?" examples = [ { - "image": Image.open(img_path), + "image": load_image(image), "question": question, }, { - "image": img_path, + "image": image, "question": question, }, { - "image": img_path, + "image": image, "question": question, - "word_boxes": json.load(open(words_path, "r")), + "word_boxes": word_boxes, }, { "image": None, "question": question, - "word_boxes": json.load(open(words_path, "r")), + "word_boxes": word_boxes, }, ] return dqa_pipeline, examples @@ -103,7 +109,7 @@ def run_pipeline_test(self, dqa_pipeline, examples): @require_pytesseract def test_small_model_pt(self): dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") - image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + image = INVOICE_URL question = "How many cats are there?" expected_output = [ @@ -157,7 +163,7 @@ def test_large_model_pt(self): model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) - image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) @@ -206,7 +212,7 @@ def test_large_model_pt_layoutlm(self): tokenizer=tokenizer, revision="3dc6de3", ) - image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) @@ -263,7 +269,7 @@ def test_large_model_pt_donut(self): feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) - image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), {"answer": "us-001"}) From 2a2bf09292e70d4306a185cbb114347ac4dea373 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Tue, 6 Sep 2022 07:42:38 -0700 Subject: [PATCH 34/34] Address comments --- src/transformers/pipelines/__init__.py | 4 +-- .../pipelines/document_question_answering.py | 32 ++++++++----------- 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index ca8102d57695fa..e3f9e603b5111d 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -222,9 +222,7 @@ "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { - "model": { - "pt": ("impira/layoutlm-document-qa", "3a93017") - }, # TODO Update with custom pipeline removed, just before we land + "model": {"pt": ("impira/layoutlm-document-qa", "3a93017")}, }, "type": "multimodal", }, diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index 3329ce2dc48103..b0fe18cb9dd6c2 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -120,6 +120,8 @@ def __init__(self, *args, **kwargs): if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": self.model_type = ModelType.VisionEncoderDecoder + if self.model.config.encoder.model_type != "donut-swin": + raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") elif self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: @@ -240,12 +242,7 @@ def __call__( inputs = image return super().__call__(inputs, **kwargs) - def preprocess( - self, - input, - lang=None, - tesseract_config="", - ): + def preprocess(self, input, lang=None, tesseract_config=""): image = None image_features = {} if input.get("image", None) is not None: @@ -342,14 +339,14 @@ def preprocess( if "boxes" not in tokenizer_kwargs: bbox = [] for batch_index in range(num_spans): - for i, s, w in zip( + for input_id, sequence_id, word_id in zip( encoding.input_ids[batch_index], encoding.sequence_ids(batch_index), encoding.word_ids(batch_index), ): - if s == 1: - bbox.append(boxes[w]) - elif i == self.tokenizer.sep_token_id: + if sequence_id == 1: + bbox.append(boxes[word_id]) + elif input_id == self.tokenizer.sep_token_id: bbox.append([1000] * 4) else: bbox.append([0] * 4) @@ -361,12 +358,7 @@ def preprocess( word_ids = [encoding.word_ids(i) for i in range(num_spans)] - return { - **encoding, - "p_mask": p_mask, - "word_ids": word_ids, - "words": words, - } + return {**encoding, "p_mask": p_mask, "word_ids": word_ids, "words": words} def _forward(self, model_inputs): p_mask = model_inputs.pop("p_mask", None) @@ -396,8 +388,10 @@ def postprocess(self, model_outputs, top_k=1, **kwargs): return answers def postprocess_donut(self, model_outputs, **kwargs): - # postprocess sequence = self.tokenizer.batch_decode(model_outputs.sequences)[0] + + # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer + # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { @@ -431,8 +425,8 @@ def postprocess_extractive_qa( ) word_ids = model_outputs["word_ids"][0] - for s, e, score in zip(starts, ends, scores): - word_start, word_end = word_ids[s], word_ids[e] + for start, eend, score in zip(starts, ends, scores): + word_start, word_end = word_ids[start], word_ids[eend] if word_start is not None and word_end is not None: answers.append( {