From fc8833600cac6e23028fab0a7ccdfc1d2714ab84 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Wed, 21 Mar 2018 19:51:24 +0800
Subject: [PATCH 01/40] ready to coco_reader

---
 fluid/object_detection/coco_reader.py | 181 ++++++++++++++++++++++++++
 fluid/object_detection/coco_train.py  | 112 ++++++++++++++++
 fluid/object_detection/train.py       |   2 +-
 3 files changed, 294 insertions(+), 1 deletion(-)
 create mode 100644 fluid/object_detection/coco_reader.py
 create mode 100644 fluid/object_detection/coco_train.py

diff --git a/fluid/object_detection/coco_reader.py b/fluid/object_detection/coco_reader.py
new file mode 100644
index 0000000000..4e680c2999
--- /dev/null
+++ b/fluid/object_detection/coco_reader.py
@@ -0,0 +1,181 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import image_util
+from paddle.utils.image_util import *
+import random
+from PIL import Image
+import numpy as np
+import xml.etree.ElementTree
+import os
+
+
+class Settings(object):
+    def __init__(self, data_dir, label_file, resize_h, resize_w, mean_value):
+        self._data_dir = data_dir
+        self._label_list = []
+        label_fpath = os.path.join(data_dir, label_file)
+        for line in open(label_fpath):
+            self._label_list.append(line.strip())
+
+        self._resize_height = resize_h
+        self._resize_width = resize_w
+        self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(
+            'float32')
+
+    @property
+    def data_dir(self):
+        return self._data_dir
+
+    @property
+    def label_list(self):
+        return self._label_list
+
+    @property
+    def resize_h(self):
+        return self._resize_height
+
+    @property
+    def resize_w(self):
+        return self._resize_width
+
+    @property
+    def img_mean(self):
+        return self._img_mean
+
+
+def _reader_creator(settings, file_list, mode, shuffle):
+    def reader():
+        with open(file_list) as flist:
+            lines = [line.strip() for line in flist]
+            if shuffle:
+                random.shuffle(lines)
+            for line in lines:
+                if mode == 'train' or mode == 'test':
+                    img_path, label_path = line.split()
+                    img_path = os.path.join(settings.data_dir, img_path)
+                    label_path = os.path.join(settings.data_dir, label_path)
+                elif mode == 'infer':
+                    img_path = os.path.join(settings.data_dir, line)
+
+                img = Image.open(img_path)
+                img_width, img_height = img.size
+                img = np.array(img)
+
+                # layout: label | xmin | ymin | xmax | ymax | difficult
+                if mode == 'train' or mode == 'test':
+                    bbox_labels = []
+                    root = xml.etree.ElementTree.parse(label_path).getroot()
+                    for object in root.findall('object'):
+                        bbox_sample = []
+                        # start from 1
+                        bbox_sample.append(
+                            float(
+                                settings.label_list.index(
+                                    object.find('name').text)))
+                        bbox = object.find('bndbox')
+                        difficult = float(object.find('difficult').text)
+                        bbox_sample.append(
+                            float(bbox.find('xmin').text) / img_width)
+                        bbox_sample.append(
+                            float(bbox.find('ymin').text) / img_height)
+                        bbox_sample.append(
+                            float(bbox.find('xmax').text) / img_width)
+                        bbox_sample.append(
+                            float(bbox.find('ymax').text) / img_height)
+                        bbox_sample.append(difficult)
+                        bbox_labels.append(bbox_sample)
+
+                    sample_labels = bbox_labels
+                    if mode == 'train':
+                        batch_sampler = []
+                        # hard-code here
+                        batch_sampler.append(
+                            image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0,
+                                               0.0))
+                        batch_sampler.append(
+                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1,
+                                               0.0))
+                        batch_sampler.append(
+                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3,
+                                               0.0))
+                        batch_sampler.append(
+                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5,
+                                               0.0))
+                        batch_sampler.append(
+                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7,
+                                               0.0))
+                        batch_sampler.append(
+                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9,
+                                               0.0))
+                        batch_sampler.append(
+                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0,
+                                               1.0))
+                        """ random crop """
+                        sampled_bbox = image_util.generate_batch_samples(
+                            batch_sampler, bbox_labels, img_width, img_height)
+
+                        if len(sampled_bbox) > 0:
+                            idx = int(random.uniform(0, len(sampled_bbox)))
+                            img, sample_labels = image_util.crop_image(
+                                img, bbox_labels, sampled_bbox[idx], img_width,
+                                img_height)
+
+                img = Image.fromarray(img)
+                img = img.resize((settings.resize_w, settings.resize_h),
+                                 Image.ANTIALIAS)
+                img = np.array(img)
+
+                if mode == 'train':
+                    mirror = int(random.uniform(0, 2))
+                    if mirror == 1:
+                        img = img[:, ::-1, :]
+                        for i in xrange(len(sample_labels)):
+                            tmp = sample_labels[i][1]
+                            sample_labels[i][1] = 1 - sample_labels[i][3]
+                            sample_labels[i][3] = 1 - tmp
+
+                if len(img.shape) == 3:
+                    img = np.swapaxes(img, 1, 2)
+                    img = np.swapaxes(img, 1, 0)
+
+                img = img[[2, 1, 0], :, :]
+                img = img.astype('float32')
+                img -= settings.img_mean
+                img = img.flatten()
+                img = img * 0.007843
+
+                sample_labels = np.array(sample_labels)
+                if mode == 'train' or mode == 'test':
+                    if mode == 'train' and len(sample_labels) == 0: continue
+                    yield img.astype(
+                        'float32'
+                    ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
+                        'int32'), sample_labels[:, -1].astype('int32')
+                elif mode == 'infer':
+                    yield img.astype('float32')
+
+    return reader
+
+
+def train(settings, file_list, shuffle=True):
+    return _reader_creator(settings, file_list, 'train', shuffle)
+
+
+def test(settings, file_list):
+    return _reader_creator(settings, file_list, 'test', False)
+
+
+def infer(settings, file_list):
+    return _reader_creator(settings, file_list, 'infer', False)
diff --git a/fluid/object_detection/coco_train.py b/fluid/object_detection/coco_train.py
new file mode 100644
index 0000000000..949efdcbdb
--- /dev/null
+++ b/fluid/object_detection/coco_train.py
@@ -0,0 +1,112 @@
+import paddle.v2 as paddle
+import paddle.fluid as fluid
+import os
+import coco_reader as reader
+import numpy as np
+import load_model as load_model
+from mobilenet_ssd import mobile_net
+
+
+def train(train_file_list,
+          val_file_list,
+          data_args,
+          learning_rate,
+          batch_size,
+          num_passes,
+          model_save_dir='model',
+          init_model_path=None):
+    image_shape = [3, data_args.resize_h, data_args.resize_w]
+
+    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
+    gt_box = fluid.layers.data(
+        name='gt_box', shape=[4], dtype='float32', lod_level=1)
+    gt_label = fluid.layers.data(
+        name='gt_label', shape=[1], dtype='int32', lod_level=1)
+    difficult = fluid.layers.data(
+        name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
+
+    mbox_locs, mbox_confs, box, box_var = mobile_net(image, image_shape)
+    nmsed_out = fluid.layers.detection_output(
+        mbox_locs, mbox_confs, box, box_var, nms_threshold=0.45)
+    loss_vec = fluid.layers.ssd_loss(mbox_locs, mbox_confs, gt_box, gt_label,
+                                     box, box_var)
+    loss = fluid.layers.nn.reduce_sum(loss_vec)
+
+    map_eval = None
+    test_program = fluid.default_main_program().clone(for_test=True)
+    with fluid.program_guard(test_program):
+        map_eval = fluid.evaluator.DetectionMAP(
+            nmsed_out,
+            gt_label,
+            gt_box,
+            difficult,
+            21,
+            overlap_threshold=0.5,
+            evaluate_difficult=False,
+            ap_version='11point')
+
+    optimizer = fluid.optimizer.Momentum(
+        learning_rate=fluid.layers.exponential_decay(
+            learning_rate=learning_rate,
+            decay_steps=40000,
+            decay_rate=0.1,
+            staircase=True),
+        momentum=0.9,
+        regularization=fluid.regularizer.L2Decay(0.00005), )
+
+    optimizer.minimize(loss)
+
+    place = fluid.CUDAPlace(0)
+    exe = fluid.Executor(place)
+    exe.run(fluid.default_startup_program())
+
+    load_model.load_paddlev1_vars(place)
+    train_reader = paddle.batch(
+        reader.train(data_args, train_file_list), batch_size=batch_size)
+    test_reader = paddle.batch(
+        reader.test(data_args, val_file_list), batch_size=batch_size)
+    feeder = fluid.DataFeeder(
+        place=place, feed_list=[image, gt_box, gt_label, difficult])
+
+    #print 'test_program ', test_program
+    def test(pass_id):
+        _, accum_map = map_eval.get_map_var()
+        map_eval.reset(exe)
+        test_map = None
+        for _, data in enumerate(test_reader()):
+            test_map = exe.run(test_program,
+                               feed=feeder.feed(data),
+                               fetch_list=[accum_map])
+        print("Test {0}, map {1}".format(pass_id, test_map[0]))
+
+    #print 'main_program ', fluid.default_main_program()
+    for pass_id in range(num_passes):
+        for batch_id, data in enumerate(train_reader()):
+            loss_v = exe.run(fluid.default_main_program(),
+                             feed=feeder.feed(data),
+                             fetch_list=[loss])
+            print("Pass {0}, batch {1}, loss {2}"
+                  .format(pass_id, batch_id, loss_v[0]))
+        test(pass_id)
+
+        if pass_id % 10 == 0:
+            model_path = os.path.join(model_save_dir, str(pass_id))
+            print 'save models to %s' % (model_path)
+            fluid.io.save_inference_model(model_path, ['image'], [nmsed_out],
+                                          exe)
+
+
+if __name__ == '__main__':
+    data_args = reader.Settings(
+        data_dir='./data',
+        label_file='label_list',
+        resize_h=300,
+        resize_w=300,
+        mean_value=[127.5, 127.5, 127.5])
+    train(
+        train_file_list='./data/trainval.txt',
+        val_file_list='./data/test.txt',
+        data_args=data_args,
+        learning_rate=0.001,
+        batch_size=4,
+        num_passes=300)
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 9d2dbadccf..dc5bb151c3 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -108,5 +108,5 @@ def test(pass_id):
         val_file_list='./data/test.txt',
         data_args=data_args,
         learning_rate=0.001,
-        batch_size=32,
+        batch_size=4,
         num_passes=300)

From cd568b6bcc529e09670d931380d4a9d17d41c2e7 Mon Sep 17 00:00:00 2001
From: Bu Xingyuan <buxingyuan@baidu.com>
Date: Mon, 26 Mar 2018 10:51:51 +0800
Subject: [PATCH 02/40] complete coco_reader.py & coco_train.py

---
 fluid/object_detection/coco_reader.py | 241 +++++++++++++-------------
 fluid/object_detection/coco_train.py  |  13 +-
 2 files changed, 128 insertions(+), 126 deletions(-)

diff --git a/fluid/object_detection/coco_reader.py b/fluid/object_detection/coco_reader.py
index 4e680c2999..16bc75635f 100644
--- a/fluid/object_detection/coco_reader.py
+++ b/fluid/object_detection/coco_reader.py
@@ -19,16 +19,15 @@
 import numpy as np
 import xml.etree.ElementTree
 import os
+import copy
 
+# cocoapi 
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
 
 class Settings(object):
-    def __init__(self, data_dir, label_file, resize_h, resize_w, mean_value):
+    def __init__(self, data_dir, resize_h, resize_w, mean_value):
         self._data_dir = data_dir
-        self._label_list = []
-        label_fpath = os.path.join(data_dir, label_file)
-        for line in open(label_fpath):
-            self._label_list.append(line.strip())
-
         self._resize_height = resize_h
         self._resize_width = resize_w
         self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(
@@ -38,9 +37,9 @@ def __init__(self, data_dir, label_file, resize_h, resize_w, mean_value):
     def data_dir(self):
         return self._data_dir
 
-    @property
-    def label_list(self):
-        return self._label_list
+    @data_dir.setter
+    def data_dir(self, data_dir):
+        self._data_dir = data_dir 
 
     @property
     def resize_h(self):
@@ -57,125 +56,129 @@ def img_mean(self):
 
 def _reader_creator(settings, file_list, mode, shuffle):
     def reader():
-        with open(file_list) as flist:
-            lines = [line.strip() for line in flist]
-            if shuffle:
-                random.shuffle(lines)
-            for line in lines:
-                if mode == 'train' or mode == 'test':
-                    img_path, label_path = line.split()
-                    img_path = os.path.join(settings.data_dir, img_path)
-                    label_path = os.path.join(settings.data_dir, label_path)
-                elif mode == 'infer':
-                    img_path = os.path.join(settings.data_dir, line)
-
-                img = Image.open(img_path)
-                img_width, img_height = img.size
-                img = np.array(img)
-
-                # layout: label | xmin | ymin | xmax | ymax | difficult
-                if mode == 'train' or mode == 'test':
-                    bbox_labels = []
-                    root = xml.etree.ElementTree.parse(label_path).getroot()
-                    for object in root.findall('object'):
-                        bbox_sample = []
-                        # start from 1
-                        bbox_sample.append(
-                            float(
-                                settings.label_list.index(
-                                    object.find('name').text)))
-                        bbox = object.find('bndbox')
-                        difficult = float(object.find('difficult').text)
-                        bbox_sample.append(
-                            float(bbox.find('xmin').text) / img_width)
-                        bbox_sample.append(
-                            float(bbox.find('ymin').text) / img_height)
-                        bbox_sample.append(
-                            float(bbox.find('xmax').text) / img_width)
-                        bbox_sample.append(
-                            float(bbox.find('ymax').text) / img_height)
-                        bbox_sample.append(difficult)
-                        bbox_labels.append(bbox_sample)
-
-                    sample_labels = bbox_labels
-                    if mode == 'train':
-                        batch_sampler = []
-                        # hard-code here
-                        batch_sampler.append(
-                            image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0,
-                                               1.0))
-                        """ random crop """
-                        sampled_bbox = image_util.generate_batch_samples(
-                            batch_sampler, bbox_labels, img_width, img_height)
-
-                        if len(sampled_bbox) > 0:
-                            idx = int(random.uniform(0, len(sampled_bbox)))
-                            img, sample_labels = image_util.crop_image(
-                                img, bbox_labels, sampled_bbox[idx], img_width,
-                                img_height)
-
-                img = Image.fromarray(img)
-                img = img.resize((settings.resize_w, settings.resize_h),
-                                 Image.ANTIALIAS)
-                img = np.array(img)
-
+        coco=COCO(file_list)
+        image_ids = coco.getImgIds()
+        images = coco.loadImgs(image_ids)
+        if shuffle:
+            random.shuffle(images)
+        for image in images[:1]:
+            image_name = image['file_name']
+            image_path = os.path.join(settings.data_dir, image_name)
+            img = Image.open(image_path)
+            img_width, img_height = img.size
+            img = np.array(img)
+
+            if mode == 'train' or mode == 'val':
+                # layout: category_id | xmin | ymin | xmax | ymax | iscrowd | origin_coco_bbox | segmentation | area | image_id | annotation_id
+                bbox_labels = []
+                annIds = coco.getAnnIds(imgIds=image['id'])
+                anns = coco.loadAnns(annIds)
+                for ann in anns:
+                    bbox_sample = []
+                    # start from 1
+                    bbox_sample.append(ann['category_id'])
+                    bbox = ann['bbox']
+                    xmin, ymin, w, h = bbox
+                    xmax = xmin + w
+                    ymax = ymin + h
+                    bbox_sample.append(float(xmin) / img_width)
+                    bbox_sample.append(float(ymin) / img_height)
+                    bbox_sample.append(float(xmax) / img_width)
+                    bbox_sample.append(float(ymax) / img_height)
+                    bbox_sample.append(ann['iscrowd'])
+                    bbox_sample.append(ann['bbox'])
+                    bbox_sample.append(ann['segmentation'])
+                    bbox_sample.append(ann['area'])
+                    bbox_sample.append(ann['image_id'])
+                    bbox_sample.append(ann['id'])
+                    bbox_labels.append(bbox_sample)
+    
+                sample_labels = bbox_labels
                 if mode == 'train':
-                    mirror = int(random.uniform(0, 2))
-                    if mirror == 1:
-                        img = img[:, ::-1, :]
-                        for i in xrange(len(sample_labels)):
-                            tmp = sample_labels[i][1]
-                            sample_labels[i][1] = 1 - sample_labels[i][3]
-                            sample_labels[i][3] = 1 - tmp
-
-                if len(img.shape) == 3:
-                    img = np.swapaxes(img, 1, 2)
-                    img = np.swapaxes(img, 1, 0)
-
-                img = img[[2, 1, 0], :, :]
-                img = img.astype('float32')
-                img -= settings.img_mean
-                img = img.flatten()
-                img = img * 0.007843
-
-                sample_labels = np.array(sample_labels)
-                if mode == 'train' or mode == 'test':
-                    if mode == 'train' and len(sample_labels) == 0: continue
-                    yield img.astype(
-                        'float32'
-                    ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
-                        'int32'), sample_labels[:, -1].astype('int32')
-                elif mode == 'infer':
-                    yield img.astype('float32')
+                    batch_sampler = []
+                    # hard-code here
+                    batch_sampler.append(
+                        image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0,
+                                           0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1,
+                                           0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3,
+                                           0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5,
+                                           0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7,
+                                           0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9,
+                                           0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0,
+                                           1.0))
+                    """ random crop """
+                    sampled_bbox = image_util.generate_batch_samples(
+                        batch_sampler, bbox_labels, img_width, img_height)
+    
+                    if len(sampled_bbox) > 0:
+                        idx = int(random.uniform(0, len(sampled_bbox)))
+                        img, sample_labels = image_util.crop_image(
+                            img, bbox_labels, sampled_bbox[idx], img_width,
+                            img_height)
+
+            img = Image.fromarray(img)
+            img = img.resize((settings.resize_w, settings.resize_h),
+                             Image.ANTIALIAS)
+            img = np.array(img)
+
+            if mode == 'train':
+                mirror = int(random.uniform(0, 2))
+                if mirror == 1:
+                    img = img[:, ::-1, :]
+                    for i in xrange(len(sample_labels)):
+                        tmp = sample_labels[i][1]
+                        sample_labels[i][1] = 1 - sample_labels[i][3]
+                        sample_labels[i][3] = 1 - tmp
+
+            if len(img.shape) == 3:
+                img = np.swapaxes(img, 1, 2)
+                img = np.swapaxes(img, 1, 0)
+
+            img = img[[2, 1, 0], :, :]
+            img = img.astype('float32')
+            img -= settings.img_mean
+            img = img.flatten()
+            img = img * 0.007843
+
+            sample_labels = np.array(sample_labels)
+            if mode == 'train' or mode == 'val':
+                if mode == 'train' and len(sample_labels) == 0: continue
+                yield img.astype(
+                    'float32'
+                ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
+                    'int32'), sample_labels[:, -1].astype('int32')
+            elif mode == 'infer':
+                yield img.astype('float32')
 
     return reader
 
 
 def train(settings, file_list, shuffle=True):
-    return _reader_creator(settings, file_list, 'train', shuffle)
+    train_settings = copy.copy(settings)
+    train_settings.data_dir=os.path.join(settings.data_dir, "coco_train2014")
+    return _reader_creator(train_settings, file_list, 'train', shuffle)
 
+def val(settings, file_list):
+    val_settings = copy.copy(settings)
+    val_settings.data_dir=os.path.join(settings.data_dir, "coco_val2014")
+    return _reader_creator(val_settings, file_list, 'val', False)
 
 def test(settings, file_list):
-    return _reader_creator(settings, file_list, 'test', False)
-
+    test_settings = copy.copy(settings)
+    test_settings.data_dir=os.path.join(settings.data_dir, "coco_test2014")
+    return _reader_creator(test_settings, file_list, 'test', False)
 
 def infer(settings, file_list):
     return _reader_creator(settings, file_list, 'infer', False)
diff --git a/fluid/object_detection/coco_train.py b/fluid/object_detection/coco_train.py
index 949efdcbdb..fc592b34b0 100644
--- a/fluid/object_detection/coco_train.py
+++ b/fluid/object_detection/coco_train.py
@@ -64,7 +64,7 @@ def train(train_file_list,
     train_reader = paddle.batch(
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
-        reader.test(data_args, val_file_list), batch_size=batch_size)
+        reader.val(data_args, val_file_list), batch_size=batch_size)
     feeder = fluid.DataFeeder(
         place=place, feed_list=[image, gt_box, gt_label, difficult])
 
@@ -98,15 +98,14 @@ def test(pass_id):
 
 if __name__ == '__main__':
     data_args = reader.Settings(
-        data_dir='./data',
-        label_file='label_list',
+        data_dir='./data/coco',
         resize_h=300,
         resize_w=300,
         mean_value=[127.5, 127.5, 127.5])
     train(
-        train_file_list='./data/trainval.txt',
-        val_file_list='./data/test.txt',
+        train_file_list='./data/coco/annotations/instances_train2014.json',
+        val_file_list='./data/coco/annotations/instances_val2014.json',
         data_args=data_args,
         learning_rate=0.001,
-        batch_size=4,
-        num_passes=300)
+        batch_size=1,
+        num_passes=1)

From e037b8ed4e34b61e113169942a0ab2e49ceae496 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Mon, 26 Mar 2018 13:39:19 +0800
Subject: [PATCH 03/40] complete coco reader

---
 fluid/object_detection/coco_reader.py | 193 ++++++++++++++++++--------
 fluid/object_detection/coco_train.py  |  81 ++++++++---
 2 files changed, 192 insertions(+), 82 deletions(-)

diff --git a/fluid/object_detection/coco_reader.py b/fluid/object_detection/coco_reader.py
index 16bc75635f..c4663d6acb 100644
--- a/fluid/object_detection/coco_reader.py
+++ b/fluid/object_detection/coco_reader.py
@@ -25,13 +25,46 @@
 from pycocotools.coco import COCO
 from pycocotools.cocoeval import COCOeval
 
+
 class Settings(object):
-    def __init__(self, data_dir, resize_h, resize_w, mean_value):
+    def __init__(self, dataset, data_dir, label_file, resize_h, resize_w,
+                 mean_value, apply_distort, apply_expand):
+        self._dataset = dataset
         self._data_dir = data_dir
+        if dataset == "pascalvoc":
+            self._label_list = []
+            label_fpath = os.path.join(data_dir, label_file)
+            for line in open(label_fpath):
+                self._label_list.append(line.strip())
+
+        self._apply_distort = apply_distort
+        self._apply_expand = apply_expand
         self._resize_height = resize_h
         self._resize_width = resize_w
         self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(
             'float32')
+        self._expand_prob = 0.5
+        self._expand_max_ratio = 4
+        self._hue_prob = 0.5
+        self._hue_delta = 18
+        self._contrast_prob = 0.5
+        self._contrast_delta = 0.5
+        self._saturation_prob = 0.5
+        self._saturation_delta = 0.5
+        self._brightness_prob = 0.5
+        self._brightness_delta = 0.125
+
+    @property
+    def dataset(self):
+        return self._dataset
+
+    @property
+    def apply_distort(self):
+        return self._apply_expand
+
+    @property
+    def apply_distort(self):
+        return self._apply_distort
 
     @property
     def data_dir(self):
@@ -39,7 +72,11 @@ def data_dir(self):
 
     @data_dir.setter
     def data_dir(self, data_dir):
-        self._data_dir = data_dir 
+        self._data_dir = data_dir
+
+    @property
+    def label_list(self):
+        return self._label_list
 
     @property
     def resize_h(self):
@@ -56,79 +93,111 @@ def img_mean(self):
 
 def _reader_creator(settings, file_list, mode, shuffle):
     def reader():
-        coco=COCO(file_list)
-        image_ids = coco.getImgIds()
-        images = coco.loadImgs(image_ids)
+        if settings.dataset == 'coco':
+            coco = COCO(file_list)
+            image_ids = coco.getImgIds()
+            images = coco.loadImgs(image_ids)
+        elif settings.dataset == 'pascalvoc':
+            flist = open(file_list)
+            lines = [line.strip() for line in flist]
         if shuffle:
             random.shuffle(images)
+
         for image in images[:1]:
-            image_name = image['file_name']
-            image_path = os.path.join(settings.data_dir, image_name)
+            if settings.dataset == 'coco':
+                image_name = image['file_name']
+                image_path = os.path.join(settings.data_dir, image_name)
+            elif settings.dataset == 'pascalvoc':
+                flist = open(file_list)
+                lines = [line.strip() for line in flist]
+
             img = Image.open(image_path)
             img_width, img_height = img.size
-            img = np.array(img)
 
-            if mode == 'train' or mode == 'val':
-                # layout: category_id | xmin | ymin | xmax | ymax | iscrowd | origin_coco_bbox | segmentation | area | image_id | annotation_id
-                bbox_labels = []
-                annIds = coco.getAnnIds(imgIds=image['id'])
-                anns = coco.loadAnns(annIds)
-                for ann in anns:
-                    bbox_sample = []
-                    # start from 1
-                    bbox_sample.append(ann['category_id'])
-                    bbox = ann['bbox']
-                    xmin, ymin, w, h = bbox
-                    xmax = xmin + w
-                    ymax = ymin + h
-                    bbox_sample.append(float(xmin) / img_width)
-                    bbox_sample.append(float(ymin) / img_height)
-                    bbox_sample.append(float(xmax) / img_width)
-                    bbox_sample.append(float(ymax) / img_height)
-                    bbox_sample.append(ann['iscrowd'])
-                    bbox_sample.append(ann['bbox'])
-                    bbox_sample.append(ann['segmentation'])
-                    bbox_sample.append(ann['area'])
-                    bbox_sample.append(ann['image_id'])
-                    bbox_sample.append(ann['id'])
-                    bbox_labels.append(bbox_sample)
-    
+            if mode == 'train' or mode == 'test':
+                if settings.dataset == 'coco':
+                    # layout: category_id | xmin | ymin | xmax | ymax | iscrowd | origin_coco_bbox | segmentation | area | image_id | annotation_id
+                    bbox_labels = []
+                    annIds = coco.getAnnIds(imgIds=image['id'])
+                    anns = coco.loadAnns(annIds)
+                    for ann in anns:
+                        bbox_sample = []
+                        # start from 1
+                        bbox_sample.append(ann['category_id'])
+                        bbox = ann['bbox']
+                        xmin, ymin, w, h = bbox
+                        xmax = xmin + w
+                        ymax = ymin + h
+                        bbox_sample.append(float(xmin) / img_width)
+                        bbox_sample.append(float(ymin) / img_height)
+                        bbox_sample.append(float(xmax) / img_width)
+                        bbox_sample.append(float(ymax) / img_height)
+                        bbox_sample.append(ann['iscrowd'])
+                        bbox_sample.append(ann['bbox'])
+                        bbox_sample.append(ann['segmentation'])
+                        bbox_sample.append(ann['area'])
+                        bbox_sample.append(ann['image_id'])
+                        bbox_sample.append(ann['id'])
+                        bbox_labels.append(bbox_sample)
+                elif settings.dataset == 'pascalvoc':
+                    # layout: label | xmin | ymin | xmax | ymax | difficult
+                    bbox_labels = []
+                    root = xml.etree.ElementTree.parse(label_path).getroot()
+                    for object in root.findall('object'):
+                        bbox_sample = []
+                        # start from 1
+                        bbox_sample.append(
+                            float(
+                                settings.label_list.index(
+                                    object.find('name').text)))
+                        bbox = object.find('bndbox')
+                        difficult = float(object.find('difficult').text)
+                        bbox_sample.append(
+                            float(bbox.find('xmin').text) / img_width)
+                        bbox_sample.append(
+                            float(bbox.find('ymin').text) / img_height)
+                        bbox_sample.append(
+                            float(bbox.find('xmax').text) / img_width)
+                        bbox_sample.append(
+                            float(bbox.find('ymax').text) / img_height)
+                        bbox_sample.append(difficult)
+                        bbox_labels.append(bbox_sample)
+
                 sample_labels = bbox_labels
                 if mode == 'train':
+                    if settings._apply_distort:
+                        img = image_util.distort_image(img, settings)
+                    if settings._apply_expand:
+                        img, bbox_labels = image_util.expand_image(
+                            img, bbox_labels, img_width, img_height, settings)
                     batch_sampler = []
                     # hard-code here
                     batch_sampler.append(
-                        image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0,
-                                           0.0))
+                        image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))
                     batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1,
-                                           0.0))
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))
                     batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3,
-                                           0.0))
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))
                     batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5,
-                                           0.0))
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))
                     batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7,
-                                           0.0))
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))
                     batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9,
-                                           0.0))
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))
                     batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0,
-                                           1.0))
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
                     """ random crop """
                     sampled_bbox = image_util.generate_batch_samples(
                         batch_sampler, bbox_labels, img_width, img_height)
-    
+
+                    img = np.array(img)
                     if len(sampled_bbox) > 0:
                         idx = int(random.uniform(0, len(sampled_bbox)))
                         img, sample_labels = image_util.crop_image(
                             img, bbox_labels, sampled_bbox[idx], img_width,
                             img_height)
 
-            img = Image.fromarray(img)
+                    img = Image.fromarray(img)
             img = img.resize((settings.resize_w, settings.resize_h),
                              Image.ANTIALIAS)
             img = np.array(img)
@@ -153,7 +222,7 @@ def reader():
             img = img * 0.007843
 
             sample_labels = np.array(sample_labels)
-            if mode == 'train' or mode == 'val':
+            if mode == 'train' or mode == 'test':
                 if mode == 'train' and len(sample_labels) == 0: continue
                 yield img.astype(
                     'float32'
@@ -166,19 +235,23 @@ def reader():
 
 
 def train(settings, file_list, shuffle=True):
-    train_settings = copy.copy(settings)
-    train_settings.data_dir=os.path.join(settings.data_dir, "coco_train2014")
-    return _reader_creator(train_settings, file_list, 'train', shuffle)
+    if settings.dataset == 'coco':
+        train_settings = copy.copy(settings)
+        train_settings.data_dir = os.path.join(settings.data_dir,
+                                               "coco_train2014")
+        return _reader_creator(train_settings, file_list, 'train', shuffle)
+    elif settings.dataset == 'pascalvoc':
+        return _reader_creator(settings, file_list, 'train', shuffle)
 
-def val(settings, file_list):
-    val_settings = copy.copy(settings)
-    val_settings.data_dir=os.path.join(settings.data_dir, "coco_val2014")
-    return _reader_creator(val_settings, file_list, 'val', False)
 
 def test(settings, file_list):
-    test_settings = copy.copy(settings)
-    test_settings.data_dir=os.path.join(settings.data_dir, "coco_test2014")
-    return _reader_creator(test_settings, file_list, 'test', False)
+    if settings.dataset == 'coco':
+        test_settings = copy.copy(settings)
+        test_settings.data_dir = os.path.join(settings.data_dir, "coco_val2014")
+        return _reader_creator(test_settings, file_list, 'test', False)
+    elif settings.dataset == 'pascalvoc':
+        return _reader_creator(settings, file_list, 'test', False)
+
 
 def infer(settings, file_list):
     return _reader_creator(settings, file_list, 'infer', False)
diff --git a/fluid/object_detection/coco_train.py b/fluid/object_detection/coco_train.py
index fc592b34b0..e27fe0f47b 100644
--- a/fluid/object_detection/coco_train.py
+++ b/fluid/object_detection/coco_train.py
@@ -1,13 +1,24 @@
 import paddle.v2 as paddle
 import paddle.fluid as fluid
-import os
 import coco_reader as reader
-import numpy as np
 import load_model as load_model
 from mobilenet_ssd import mobile_net
+from utility import add_arguments, print_arguments
+import os
+import numpy as np
+import argparse
+import functools
+
+parser = argparse.ArgumentParser(description=__doc__)
+add_arg = functools.partial(add_arguments, argparser=parser)
+# yapf: disable
+add_arg('parallel',    bool,   False,     "Whether use parallel training.")
+add_arg('use_gpu',     bool,   True,     "Whether use GPU.")
+# yapf: disable
 
 
-def train(train_file_list,
+def train(args,
+          train_file_list,
           val_file_list,
           data_args,
           learning_rate,
@@ -25,16 +36,37 @@ def train(train_file_list,
     difficult = fluid.layers.data(
         name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
 
-    mbox_locs, mbox_confs, box, box_var = mobile_net(image, image_shape)
-    nmsed_out = fluid.layers.detection_output(
-        mbox_locs, mbox_confs, box, box_var, nms_threshold=0.45)
-    loss_vec = fluid.layers.ssd_loss(mbox_locs, mbox_confs, gt_box, gt_label,
+    if args.parallel:
+        places = fluid.layers.get_places()
+        pd = fluid.layers.ParallelDo(places)
+        with pd.do():
+            image_ = pd.read_input(image)
+            gt_box_ = pd.read_input(gt_box)
+            gt_label_ = pd.read_input(gt_label)
+            difficult_ = pd.read_input(difficult)
+            locs, confs, box, box_var = mobile_net(image_, image_shape)
+            loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_,
+                                         box, box_var)
+            pd.write_output(loss)
+            pd.write_output(locs)
+            pd.write_output(confs)
+            pd.write_output(box)
+            pd.write_output(box_var)
+
+        loss, locs, confs, box, box_var = pd()
+        loss = fluid.layers.reduce_sum(loss)
+    else:
+        locs, confs, box, box_var = mobile_net(image, image_shape)
+        nmsed_out = fluid.layers.detection_output(
+            locs, confs, box, box_var, nms_threshold=0.45)
+        loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label,
                                      box, box_var)
-    loss = fluid.layers.nn.reduce_sum(loss_vec)
+        loss = fluid.layers.reduce_sum(loss)
 
-    map_eval = None
     test_program = fluid.default_main_program().clone(for_test=True)
     with fluid.program_guard(test_program):
+        nmsed_out = fluid.layers.detection_output(
+            locs, confs, box, box_var, nms_threshold=0.45)
         map_eval = fluid.evaluator.DetectionMAP(
             nmsed_out,
             gt_label,
@@ -45,26 +77,24 @@ def train(train_file_list,
             evaluate_difficult=False,
             ap_version='11point')
 
-    optimizer = fluid.optimizer.Momentum(
-        learning_rate=fluid.layers.exponential_decay(
-            learning_rate=learning_rate,
-            decay_steps=40000,
-            decay_rate=0.1,
-            staircase=True),
-        momentum=0.9,
+    boundaries = [40000, 60000]
+    values = [0.001, 0.0005, 0.00025]
+    optimizer = fluid.optimizer.RMSProp(
+        learning_rate=fluid.layers.piecewise_decay(boundaries, values),
         regularization=fluid.regularizer.L2Decay(0.00005), )
 
     optimizer.minimize(loss)
 
-    place = fluid.CUDAPlace(0)
+    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
     exe.run(fluid.default_startup_program())
 
-    load_model.load_paddlev1_vars(place)
+    load_model.load_and_set_vars(place)
+    #load_model.load_paddlev1_vars(place)
     train_reader = paddle.batch(
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
-        reader.val(data_args, val_file_list), batch_size=batch_size)
+        reader.test(data_args, val_file_list), batch_size=batch_size)
     feeder = fluid.DataFeeder(
         place=place, feed_list=[image, gt_box, gt_label, difficult])
 
@@ -85,8 +115,9 @@ def test(pass_id):
             loss_v = exe.run(fluid.default_main_program(),
                              feed=feeder.feed(data),
                              fetch_list=[loss])
-            print("Pass {0}, batch {1}, loss {2}"
-                  .format(pass_id, batch_id, loss_v[0]))
+            if batch_id % 20 == 0:
+                print("Pass {0}, batch {1}, loss {2}"
+                      .format(pass_id, batch_id, loss_v[0]))
         test(pass_id)
 
         if pass_id % 10 == 0:
@@ -97,12 +128,18 @@ def test(pass_id):
 
 
 if __name__ == '__main__':
+    args = parser.parse_args()
+    print_arguments(args)
     data_args = reader.Settings(
+        dataset='coco', # coco or pascalvoc
         data_dir='./data/coco',
+        label_file='label_list',
+        apply_distort=True,
+        apply_expand=True,
         resize_h=300,
         resize_w=300,
         mean_value=[127.5, 127.5, 127.5])
-    train(
+    train(args,
         train_file_list='./data/coco/annotations/instances_train2014.json',
         val_file_list='./data/coco/annotations/instances_val2014.json',
         data_args=data_args,

From 1e2b63177ee4ba462b81c545a0c9319207d71072 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Mon, 26 Mar 2018 16:12:27 +0800
Subject: [PATCH 04/40] rename file

---
 fluid/object_detection/coco_reader.py | 257 --------------------------
 fluid/object_detection/coco_train.py  | 148 ---------------
 fluid/object_detection/reader.py      | 245 ++++++++++++++----------
 fluid/object_detection/train.py       |  18 +-
 4 files changed, 160 insertions(+), 508 deletions(-)
 delete mode 100644 fluid/object_detection/coco_reader.py
 delete mode 100644 fluid/object_detection/coco_train.py

diff --git a/fluid/object_detection/coco_reader.py b/fluid/object_detection/coco_reader.py
deleted file mode 100644
index c4663d6acb..0000000000
--- a/fluid/object_detection/coco_reader.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import image_util
-from paddle.utils.image_util import *
-import random
-from PIL import Image
-import numpy as np
-import xml.etree.ElementTree
-import os
-import copy
-
-# cocoapi 
-from pycocotools.coco import COCO
-from pycocotools.cocoeval import COCOeval
-
-
-class Settings(object):
-    def __init__(self, dataset, data_dir, label_file, resize_h, resize_w,
-                 mean_value, apply_distort, apply_expand):
-        self._dataset = dataset
-        self._data_dir = data_dir
-        if dataset == "pascalvoc":
-            self._label_list = []
-            label_fpath = os.path.join(data_dir, label_file)
-            for line in open(label_fpath):
-                self._label_list.append(line.strip())
-
-        self._apply_distort = apply_distort
-        self._apply_expand = apply_expand
-        self._resize_height = resize_h
-        self._resize_width = resize_w
-        self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(
-            'float32')
-        self._expand_prob = 0.5
-        self._expand_max_ratio = 4
-        self._hue_prob = 0.5
-        self._hue_delta = 18
-        self._contrast_prob = 0.5
-        self._contrast_delta = 0.5
-        self._saturation_prob = 0.5
-        self._saturation_delta = 0.5
-        self._brightness_prob = 0.5
-        self._brightness_delta = 0.125
-
-    @property
-    def dataset(self):
-        return self._dataset
-
-    @property
-    def apply_distort(self):
-        return self._apply_expand
-
-    @property
-    def apply_distort(self):
-        return self._apply_distort
-
-    @property
-    def data_dir(self):
-        return self._data_dir
-
-    @data_dir.setter
-    def data_dir(self, data_dir):
-        self._data_dir = data_dir
-
-    @property
-    def label_list(self):
-        return self._label_list
-
-    @property
-    def resize_h(self):
-        return self._resize_height
-
-    @property
-    def resize_w(self):
-        return self._resize_width
-
-    @property
-    def img_mean(self):
-        return self._img_mean
-
-
-def _reader_creator(settings, file_list, mode, shuffle):
-    def reader():
-        if settings.dataset == 'coco':
-            coco = COCO(file_list)
-            image_ids = coco.getImgIds()
-            images = coco.loadImgs(image_ids)
-        elif settings.dataset == 'pascalvoc':
-            flist = open(file_list)
-            lines = [line.strip() for line in flist]
-        if shuffle:
-            random.shuffle(images)
-
-        for image in images[:1]:
-            if settings.dataset == 'coco':
-                image_name = image['file_name']
-                image_path = os.path.join(settings.data_dir, image_name)
-            elif settings.dataset == 'pascalvoc':
-                flist = open(file_list)
-                lines = [line.strip() for line in flist]
-
-            img = Image.open(image_path)
-            img_width, img_height = img.size
-
-            if mode == 'train' or mode == 'test':
-                if settings.dataset == 'coco':
-                    # layout: category_id | xmin | ymin | xmax | ymax | iscrowd | origin_coco_bbox | segmentation | area | image_id | annotation_id
-                    bbox_labels = []
-                    annIds = coco.getAnnIds(imgIds=image['id'])
-                    anns = coco.loadAnns(annIds)
-                    for ann in anns:
-                        bbox_sample = []
-                        # start from 1
-                        bbox_sample.append(ann['category_id'])
-                        bbox = ann['bbox']
-                        xmin, ymin, w, h = bbox
-                        xmax = xmin + w
-                        ymax = ymin + h
-                        bbox_sample.append(float(xmin) / img_width)
-                        bbox_sample.append(float(ymin) / img_height)
-                        bbox_sample.append(float(xmax) / img_width)
-                        bbox_sample.append(float(ymax) / img_height)
-                        bbox_sample.append(ann['iscrowd'])
-                        bbox_sample.append(ann['bbox'])
-                        bbox_sample.append(ann['segmentation'])
-                        bbox_sample.append(ann['area'])
-                        bbox_sample.append(ann['image_id'])
-                        bbox_sample.append(ann['id'])
-                        bbox_labels.append(bbox_sample)
-                elif settings.dataset == 'pascalvoc':
-                    # layout: label | xmin | ymin | xmax | ymax | difficult
-                    bbox_labels = []
-                    root = xml.etree.ElementTree.parse(label_path).getroot()
-                    for object in root.findall('object'):
-                        bbox_sample = []
-                        # start from 1
-                        bbox_sample.append(
-                            float(
-                                settings.label_list.index(
-                                    object.find('name').text)))
-                        bbox = object.find('bndbox')
-                        difficult = float(object.find('difficult').text)
-                        bbox_sample.append(
-                            float(bbox.find('xmin').text) / img_width)
-                        bbox_sample.append(
-                            float(bbox.find('ymin').text) / img_height)
-                        bbox_sample.append(
-                            float(bbox.find('xmax').text) / img_width)
-                        bbox_sample.append(
-                            float(bbox.find('ymax').text) / img_height)
-                        bbox_sample.append(difficult)
-                        bbox_labels.append(bbox_sample)
-
-                sample_labels = bbox_labels
-                if mode == 'train':
-                    if settings._apply_distort:
-                        img = image_util.distort_image(img, settings)
-                    if settings._apply_expand:
-                        img, bbox_labels = image_util.expand_image(
-                            img, bbox_labels, img_width, img_height, settings)
-                    batch_sampler = []
-                    # hard-code here
-                    batch_sampler.append(
-                        image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
-                    """ random crop """
-                    sampled_bbox = image_util.generate_batch_samples(
-                        batch_sampler, bbox_labels, img_width, img_height)
-
-                    img = np.array(img)
-                    if len(sampled_bbox) > 0:
-                        idx = int(random.uniform(0, len(sampled_bbox)))
-                        img, sample_labels = image_util.crop_image(
-                            img, bbox_labels, sampled_bbox[idx], img_width,
-                            img_height)
-
-                    img = Image.fromarray(img)
-            img = img.resize((settings.resize_w, settings.resize_h),
-                             Image.ANTIALIAS)
-            img = np.array(img)
-
-            if mode == 'train':
-                mirror = int(random.uniform(0, 2))
-                if mirror == 1:
-                    img = img[:, ::-1, :]
-                    for i in xrange(len(sample_labels)):
-                        tmp = sample_labels[i][1]
-                        sample_labels[i][1] = 1 - sample_labels[i][3]
-                        sample_labels[i][3] = 1 - tmp
-
-            if len(img.shape) == 3:
-                img = np.swapaxes(img, 1, 2)
-                img = np.swapaxes(img, 1, 0)
-
-            img = img[[2, 1, 0], :, :]
-            img = img.astype('float32')
-            img -= settings.img_mean
-            img = img.flatten()
-            img = img * 0.007843
-
-            sample_labels = np.array(sample_labels)
-            if mode == 'train' or mode == 'test':
-                if mode == 'train' and len(sample_labels) == 0: continue
-                yield img.astype(
-                    'float32'
-                ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
-                    'int32'), sample_labels[:, -1].astype('int32')
-            elif mode == 'infer':
-                yield img.astype('float32')
-
-    return reader
-
-
-def train(settings, file_list, shuffle=True):
-    if settings.dataset == 'coco':
-        train_settings = copy.copy(settings)
-        train_settings.data_dir = os.path.join(settings.data_dir,
-                                               "coco_train2014")
-        return _reader_creator(train_settings, file_list, 'train', shuffle)
-    elif settings.dataset == 'pascalvoc':
-        return _reader_creator(settings, file_list, 'train', shuffle)
-
-
-def test(settings, file_list):
-    if settings.dataset == 'coco':
-        test_settings = copy.copy(settings)
-        test_settings.data_dir = os.path.join(settings.data_dir, "coco_val2014")
-        return _reader_creator(test_settings, file_list, 'test', False)
-    elif settings.dataset == 'pascalvoc':
-        return _reader_creator(settings, file_list, 'test', False)
-
-
-def infer(settings, file_list):
-    return _reader_creator(settings, file_list, 'infer', False)
diff --git a/fluid/object_detection/coco_train.py b/fluid/object_detection/coco_train.py
deleted file mode 100644
index e27fe0f47b..0000000000
--- a/fluid/object_detection/coco_train.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import paddle.v2 as paddle
-import paddle.fluid as fluid
-import coco_reader as reader
-import load_model as load_model
-from mobilenet_ssd import mobile_net
-from utility import add_arguments, print_arguments
-import os
-import numpy as np
-import argparse
-import functools
-
-parser = argparse.ArgumentParser(description=__doc__)
-add_arg = functools.partial(add_arguments, argparser=parser)
-# yapf: disable
-add_arg('parallel',    bool,   False,     "Whether use parallel training.")
-add_arg('use_gpu',     bool,   True,     "Whether use GPU.")
-# yapf: disable
-
-
-def train(args,
-          train_file_list,
-          val_file_list,
-          data_args,
-          learning_rate,
-          batch_size,
-          num_passes,
-          model_save_dir='model',
-          init_model_path=None):
-    image_shape = [3, data_args.resize_h, data_args.resize_w]
-
-    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
-    gt_box = fluid.layers.data(
-        name='gt_box', shape=[4], dtype='float32', lod_level=1)
-    gt_label = fluid.layers.data(
-        name='gt_label', shape=[1], dtype='int32', lod_level=1)
-    difficult = fluid.layers.data(
-        name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
-
-    if args.parallel:
-        places = fluid.layers.get_places()
-        pd = fluid.layers.ParallelDo(places)
-        with pd.do():
-            image_ = pd.read_input(image)
-            gt_box_ = pd.read_input(gt_box)
-            gt_label_ = pd.read_input(gt_label)
-            difficult_ = pd.read_input(difficult)
-            locs, confs, box, box_var = mobile_net(image_, image_shape)
-            loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_,
-                                         box, box_var)
-            pd.write_output(loss)
-            pd.write_output(locs)
-            pd.write_output(confs)
-            pd.write_output(box)
-            pd.write_output(box_var)
-
-        loss, locs, confs, box, box_var = pd()
-        loss = fluid.layers.reduce_sum(loss)
-    else:
-        locs, confs, box, box_var = mobile_net(image, image_shape)
-        nmsed_out = fluid.layers.detection_output(
-            locs, confs, box, box_var, nms_threshold=0.45)
-        loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label,
-                                     box, box_var)
-        loss = fluid.layers.reduce_sum(loss)
-
-    test_program = fluid.default_main_program().clone(for_test=True)
-    with fluid.program_guard(test_program):
-        nmsed_out = fluid.layers.detection_output(
-            locs, confs, box, box_var, nms_threshold=0.45)
-        map_eval = fluid.evaluator.DetectionMAP(
-            nmsed_out,
-            gt_label,
-            gt_box,
-            difficult,
-            21,
-            overlap_threshold=0.5,
-            evaluate_difficult=False,
-            ap_version='11point')
-
-    boundaries = [40000, 60000]
-    values = [0.001, 0.0005, 0.00025]
-    optimizer = fluid.optimizer.RMSProp(
-        learning_rate=fluid.layers.piecewise_decay(boundaries, values),
-        regularization=fluid.regularizer.L2Decay(0.00005), )
-
-    optimizer.minimize(loss)
-
-    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
-    exe = fluid.Executor(place)
-    exe.run(fluid.default_startup_program())
-
-    load_model.load_and_set_vars(place)
-    #load_model.load_paddlev1_vars(place)
-    train_reader = paddle.batch(
-        reader.train(data_args, train_file_list), batch_size=batch_size)
-    test_reader = paddle.batch(
-        reader.test(data_args, val_file_list), batch_size=batch_size)
-    feeder = fluid.DataFeeder(
-        place=place, feed_list=[image, gt_box, gt_label, difficult])
-
-    #print 'test_program ', test_program
-    def test(pass_id):
-        _, accum_map = map_eval.get_map_var()
-        map_eval.reset(exe)
-        test_map = None
-        for _, data in enumerate(test_reader()):
-            test_map = exe.run(test_program,
-                               feed=feeder.feed(data),
-                               fetch_list=[accum_map])
-        print("Test {0}, map {1}".format(pass_id, test_map[0]))
-
-    #print 'main_program ', fluid.default_main_program()
-    for pass_id in range(num_passes):
-        for batch_id, data in enumerate(train_reader()):
-            loss_v = exe.run(fluid.default_main_program(),
-                             feed=feeder.feed(data),
-                             fetch_list=[loss])
-            if batch_id % 20 == 0:
-                print("Pass {0}, batch {1}, loss {2}"
-                      .format(pass_id, batch_id, loss_v[0]))
-        test(pass_id)
-
-        if pass_id % 10 == 0:
-            model_path = os.path.join(model_save_dir, str(pass_id))
-            print 'save models to %s' % (model_path)
-            fluid.io.save_inference_model(model_path, ['image'], [nmsed_out],
-                                          exe)
-
-
-if __name__ == '__main__':
-    args = parser.parse_args()
-    print_arguments(args)
-    data_args = reader.Settings(
-        dataset='coco', # coco or pascalvoc
-        data_dir='./data/coco',
-        label_file='label_list',
-        apply_distort=True,
-        apply_expand=True,
-        resize_h=300,
-        resize_w=300,
-        mean_value=[127.5, 127.5, 127.5])
-    train(args,
-        train_file_list='./data/coco/annotations/instances_train2014.json',
-        val_file_list='./data/coco/annotations/instances_val2014.json',
-        data_args=data_args,
-        learning_rate=0.001,
-        batch_size=1,
-        num_passes=1)
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 6a6beb6e50..68b566b84e 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -19,16 +19,23 @@
 import numpy as np
 import xml.etree.ElementTree
 import os
+import copy
+
+# cocoapi 
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
 
 
 class Settings(object):
-    def __init__(self, data_dir, label_file, resize_h, resize_w, mean_value,
-                 apply_distort, apply_expand):
+    def __init__(self, dataset, data_dir, label_file, resize_h, resize_w,
+                 mean_value, apply_distort, apply_expand):
+        self._dataset = dataset
         self._data_dir = data_dir
-        self._label_list = []
-        label_fpath = os.path.join(data_dir, label_file)
-        for line in open(label_fpath):
-            self._label_list.append(line.strip())
+        if dataset == "pascalvoc":
+            self._label_list = []
+            label_fpath = os.path.join(data_dir, label_file)
+            for line in open(label_fpath):
+                self._label_list.append(line.strip())
 
         self._apply_distort = apply_distort
         self._apply_expand = apply_expand
@@ -47,6 +54,10 @@ def __init__(self, data_dir, label_file, resize_h, resize_w, mean_value,
         self._brightness_prob = 0.5
         self._brightness_delta = 0.125
 
+    @property
+    def dataset(self):
+        return self._dataset
+
     @property
     def apply_distort(self):
         return self._apply_expand
@@ -59,6 +70,10 @@ def apply_distort(self):
     def data_dir(self):
         return self._data_dir
 
+    @data_dir.setter
+    def data_dir(self, data_dir):
+        self._data_dir = data_dir
+
     @property
     def label_list(self):
         return self._label_list
@@ -78,23 +93,54 @@ def img_mean(self):
 
 def _reader_creator(settings, file_list, mode, shuffle):
     def reader():
-        with open(file_list) as flist:
+        if settings.dataset == 'coco':
+            coco = COCO(file_list)
+            image_ids = coco.getImgIds()
+            images = coco.loadImgs(image_ids)
+        elif settings.dataset == 'pascalvoc':
+            flist = open(file_list)
             lines = [line.strip() for line in flist]
-            if shuffle:
-                random.shuffle(lines)
-            for line in lines:
-                if mode == 'train' or mode == 'test':
-                    img_path, label_path = line.split()
-                    img_path = os.path.join(settings.data_dir, img_path)
-                    label_path = os.path.join(settings.data_dir, label_path)
-                elif mode == 'infer':
-                    img_path = os.path.join(settings.data_dir, line)
-
-                img = Image.open(img_path)
-                img_width, img_height = img.size
-
-                # layout: label | xmin | ymin | xmax | ymax | difficult
-                if mode == 'train' or mode == 'test':
+        if shuffle:
+            random.shuffle(images)
+
+        for image in images:
+            if settings.dataset == 'coco':
+                image_name = image['file_name']
+                image_path = os.path.join(settings.data_dir, image_name)
+            elif settings.dataset == 'pascalvoc':
+                flist = open(file_list)
+                lines = [line.strip() for line in flist]
+
+            img = Image.open(image_path)
+            img_width, img_height = img.size
+
+            if mode == 'train' or mode == 'test':
+                if settings.dataset == 'coco':
+                    # layout: category_id | xmin | ymin | xmax | ymax | iscrowd | origin_coco_bbox | segmentation | area | image_id | annotation_id
+                    bbox_labels = []
+                    annIds = coco.getAnnIds(imgIds=image['id'])
+                    anns = coco.loadAnns(annIds)
+                    for ann in anns:
+                        bbox_sample = []
+                        # start from 1
+                        bbox_sample.append(ann['category_id'])
+                        bbox = ann['bbox']
+                        xmin, ymin, w, h = bbox
+                        xmax = xmin + w
+                        ymax = ymin + h
+                        bbox_sample.append(float(xmin) / img_width)
+                        bbox_sample.append(float(ymin) / img_height)
+                        bbox_sample.append(float(xmax) / img_width)
+                        bbox_sample.append(float(ymax) / img_height)
+                        bbox_sample.append(ann['iscrowd'])
+                        bbox_sample.append(ann['bbox'])
+                        bbox_sample.append(ann['segmentation'])
+                        bbox_sample.append(ann['area'])
+                        bbox_sample.append(ann['image_id'])
+                        bbox_sample.append(ann['id'])
+                        bbox_labels.append(bbox_sample)
+                elif settings.dataset == 'pascalvoc':
+                    # layout: label | xmin | ymin | xmax | ymax | difficult
                     bbox_labels = []
                     root = xml.etree.ElementTree.parse(label_path).getroot()
                     for object in root.findall('object'):
@@ -117,91 +163,94 @@ def reader():
                         bbox_sample.append(difficult)
                         bbox_labels.append(bbox_sample)
 
-                    sample_labels = bbox_labels
-                    if mode == 'train':
-                        if settings._apply_distort:
-                            img = image_util.distort_image(img, settings)
-                        if settings._apply_expand:
-                            img, bbox_labels = image_util.expand_image(
-                                img, bbox_labels, img_width, img_height,
-                                settings)
-                        batch_sampler = []
-                        # hard-code here
-                        batch_sampler.append(
-                            image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9,
-                                               0.0))
-                        batch_sampler.append(
-                            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0,
-                                               1.0))
-                        """ random crop """
-                        sampled_bbox = image_util.generate_batch_samples(
-                            batch_sampler, bbox_labels, img_width, img_height)
-
-                        img = np.array(img)
-                        if len(sampled_bbox) > 0:
-                            idx = int(random.uniform(0, len(sampled_bbox)))
-                            img, sample_labels = image_util.crop_image(
-                                img, bbox_labels, sampled_bbox[idx], img_width,
-                                img_height)
-
-                        img = Image.fromarray(img)
-                img = img.resize((settings.resize_w, settings.resize_h),
-                                 Image.ANTIALIAS)
-                img = np.array(img)
-
+                sample_labels = bbox_labels
                 if mode == 'train':
-                    mirror = int(random.uniform(0, 2))
-                    if mirror == 1:
-                        img = img[:, ::-1, :]
-                        for i in xrange(len(sample_labels)):
-                            tmp = sample_labels[i][1]
-                            sample_labels[i][1] = 1 - sample_labels[i][3]
-                            sample_labels[i][3] = 1 - tmp
-
-                if len(img.shape) == 3:
-                    img = np.swapaxes(img, 1, 2)
-                    img = np.swapaxes(img, 1, 0)
-
-                img = img[[2, 1, 0], :, :]
-                img = img.astype('float32')
-                img -= settings.img_mean
-                img = img.flatten()
-                img = img * 0.007843
-
-                sample_labels = np.array(sample_labels)
-                if mode == 'train' or mode == 'test':
-                    if mode == 'train' and len(sample_labels) == 0: continue
-                    yield img.astype(
-                        'float32'
-                    ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
-                        'int32'), sample_labels[:, -1].astype('int32')
-                elif mode == 'infer':
-                    yield img.astype('float32')
+                    if settings._apply_distort:
+                        img = image_util.distort_image(img, settings)
+                    if settings._apply_expand:
+                        img, bbox_labels = image_util.expand_image(
+                            img, bbox_labels, img_width, img_height, settings)
+                    batch_sampler = []
+                    # hard-code here
+                    batch_sampler.append(
+                        image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))
+                    batch_sampler.append(
+                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
+                    """ random crop """
+                    sampled_bbox = image_util.generate_batch_samples(
+                        batch_sampler, bbox_labels, img_width, img_height)
+
+                    img = np.array(img)
+                    if len(sampled_bbox) > 0:
+                        idx = int(random.uniform(0, len(sampled_bbox)))
+                        img, sample_labels = image_util.crop_image(
+                            img, bbox_labels, sampled_bbox[idx], img_width,
+                            img_height)
+
+                    img = Image.fromarray(img)
+            img = img.resize((settings.resize_w, settings.resize_h),
+                             Image.ANTIALIAS)
+            img = np.array(img)
+
+            if mode == 'train':
+                mirror = int(random.uniform(0, 2))
+                if mirror == 1:
+                    img = img[:, ::-1, :]
+                    for i in xrange(len(sample_labels)):
+                        tmp = sample_labels[i][1]
+                        sample_labels[i][1] = 1 - sample_labels[i][3]
+                        sample_labels[i][3] = 1 - tmp
+
+            if len(img.shape) == 3:
+                img = np.swapaxes(img, 1, 2)
+                img = np.swapaxes(img, 1, 0)
+
+            img = img[[2, 1, 0], :, :]
+            img = img.astype('float32')
+            img -= settings.img_mean
+            img = img.flatten()
+            img = img * 0.007843
+
+            sample_labels = np.array(sample_labels)
+            if mode == 'train' or mode == 'test':
+                if mode == 'train' and len(sample_labels) == 0: continue
+                yield img.astype(
+                    'float32'
+                ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
+                    'int32'), sample_labels[:, -1].astype('int32')
+            elif mode == 'infer':
+                yield img.astype('float32')
 
     return reader
 
 
 def train(settings, file_list, shuffle=True):
-    return _reader_creator(settings, file_list, 'train', shuffle)
+    if settings.dataset == 'coco':
+        train_settings = copy.copy(settings)
+        train_settings.data_dir = os.path.join(settings.data_dir,
+                                               "coco_train2014")
+        return _reader_creator(train_settings, file_list, 'train', shuffle)
+    elif settings.dataset == 'pascalvoc':
+        return _reader_creator(settings, file_list, 'train', shuffle)
 
 
 def test(settings, file_list):
-    return _reader_creator(settings, file_list, 'test', False)
+    if settings.dataset == 'coco':
+        test_settings = copy.copy(settings)
+        test_settings.data_dir = os.path.join(settings.data_dir, "coco_val2014")
+        return _reader_creator(test_settings, file_list, 'test', False)
+    elif settings.dataset == 'pascalvoc':
+        return _reader_creator(settings, file_list, 'test', False)
 
 
 def infer(settings, file_list):
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index f32aaaebe0..e27fe0f47b 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -1,6 +1,6 @@
 import paddle.v2 as paddle
 import paddle.fluid as fluid
-import reader
+import coco_reader as reader
 import load_model as load_model
 from mobilenet_ssd import mobile_net
 from utility import add_arguments, print_arguments
@@ -12,7 +12,7 @@
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
 # yapf: disable
-add_arg('parallel',    bool,   True,     "Whether use parallel training.")
+add_arg('parallel',    bool,   False,     "Whether use parallel training.")
 add_arg('use_gpu',     bool,   True,     "Whether use GPU.")
 # yapf: disable
 
@@ -58,8 +58,8 @@ def train(args,
     else:
         locs, confs, box, box_var = mobile_net(image, image_shape)
         nmsed_out = fluid.layers.detection_output(
-            locs, mbox_confs, box, box_var, nms_threshold=0.45)
-        loss = fluid.layers.ssd_loss(locs, mbox_confs, gt_box, gt_label,
+            locs, confs, box, box_var, nms_threshold=0.45)
+        loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label,
                                      box, box_var)
         loss = fluid.layers.reduce_sum(loss)
 
@@ -131,10 +131,18 @@ def test(pass_id):
     args = parser.parse_args()
     print_arguments(args)
     data_args = reader.Settings(
-        data_dir='./data',
+        dataset='coco', # coco or pascalvoc
+        data_dir='./data/coco',
         label_file='label_list',
         apply_distort=True,
         apply_expand=True,
         resize_h=300,
         resize_w=300,
         mean_value=[127.5, 127.5, 127.5])
+    train(args,
+        train_file_list='./data/coco/annotations/instances_train2014.json',
+        val_file_list='./data/coco/annotations/instances_val2014.json',
+        data_args=data_args,
+        learning_rate=0.001,
+        batch_size=1,
+        num_passes=1)

From 1396550d26822f8080bbab6e2786c69fe167aebc Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Wed, 28 Mar 2018 19:42:18 +0800
Subject: [PATCH 05/40] use argparse instead of explicit assignment

---
 fluid/object_detection/mobilenet_ssd.py |  9 +++++++--
 fluid/object_detection/reader.py        | 23 ++++++++++++++++++-----
 fluid/object_detection/train.py         | 15 ++++++++++-----
 3 files changed, 35 insertions(+), 12 deletions(-)

diff --git a/fluid/object_detection/mobilenet_ssd.py b/fluid/object_detection/mobilenet_ssd.py
index 21869647aa..bfe783263e 100644
--- a/fluid/object_detection/mobilenet_ssd.py
+++ b/fluid/object_detection/mobilenet_ssd.py
@@ -76,7 +76,7 @@ def extra_block(input, num_filters1, num_filters2, num_groups, stride, scale):
     return normal_conv
 
 
-def mobile_net(img, img_shape, scale=1.0):
+def mobile_net(data_args, img, img_shape, scale=1.0):
     # 300x300
     tmp = conv_bn(img, 3, int(32 * scale), 2, 1, 3)
     # 150x150
@@ -104,10 +104,15 @@ def mobile_net(img, img_shape, scale=1.0):
     module16 = extra_block(module15, 128, 256, 1, 2, scale)
     # 2x2
     module17 = extra_block(module16, 64, 128, 1, 2, scale)
+
+    if data_args.dataset == 'coco':
+        num_classes = 81
+    elif data_args.dataset == 'pascalvoc':
+        num_classes = 21
     mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
         inputs=[module11, module13, module14, module15, module16, module17],
         image=img,
-        num_classes=21,
+        num_classes=num_classes,
         min_ratio=20,
         max_ratio=90,
         min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 68b566b84e..d71be2d524 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -27,9 +27,10 @@
 
 
 class Settings(object):
-    def __init__(self, dataset, data_dir, label_file, resize_h, resize_w,
+    def __init__(self, dataset, toy, data_dir, label_file, resize_h, resize_w,
                  mean_value, apply_distort, apply_expand):
         self._dataset = dataset
+        self._toy = toy
         self._data_dir = data_dir
         if dataset == "pascalvoc":
             self._label_list = []
@@ -58,6 +59,10 @@ def __init__(self, dataset, data_dir, label_file, resize_h, resize_w,
     def dataset(self):
         return self._dataset
 
+    @property
+    def toy(self):
+        return self._toy
+
     @property
     def apply_distort(self):
         return self._apply_expand
@@ -99,17 +104,25 @@ def reader():
             images = coco.loadImgs(image_ids)
         elif settings.dataset == 'pascalvoc':
             flist = open(file_list)
-            lines = [line.strip() for line in flist]
+            images = [line.strip() for line in flist]
+
+        if settings.toy:
+            images = images[:1] if len(images) > 1 else images
+
         if shuffle:
             random.shuffle(images)
 
-        for image in images:
+        for image in images[:1]:
             if settings.dataset == 'coco':
                 image_name = image['file_name']
                 image_path = os.path.join(settings.data_dir, image_name)
             elif settings.dataset == 'pascalvoc':
-                flist = open(file_list)
-                lines = [line.strip() for line in flist]
+                if mode == 'train' or mode == 'test':
+                    image_path, label_path = image.split()
+                    image_path = os.path.join(settings.data_dir, image_path)
+                    label_path = os.path.join(settings.data_dir, label_path)
+                elif mode == 'infer':
+                    image_path = os.path.join(settings.data_dir, image)
 
             img = Image.open(image_path)
             img_width, img_height = img.size
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index fe810775bd..3724b03b1f 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -12,15 +12,18 @@
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
 add_arg('learning_rate', float, 0.001, "Learning rate.")
-add_arg('batch_size', int, 32, "Minibatch size.")
-add_arg('num_passes', int, 300, "Iteration number.")
+add_arg('batch_size', int, 1, "Minibatch size.")
+add_arg('num_passes', int, 1, "Iteration number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
-add_arg('use_gpu', bool, True, "Whether use GPU.")
+add_arg('use_gpu', bool, False, "Whether use GPU.")
 add_arg('train_file_list', str,
         './data/coco/annotations/instances_train2014.json', "train file list")
 add_arg('val_file_list', str, './data/coco/annotations/instances_val2014.json',
         "vaild file list")
 
+add_arg('is_toy', bool, True,
+        "Is Toy for quick debug, which use only one sample")
+
 
 def train(args,
           train_file_list,
@@ -49,7 +52,8 @@ def train(args,
             gt_box_ = pd.read_input(gt_box)
             gt_label_ = pd.read_input(gt_label)
             difficult_ = pd.read_input(difficult)
-            locs, confs, box, box_var = mobile_net(image_, image_shape)
+            locs, confs, box, box_var = mobile_net(data_args, image_,
+                                                   image_shape)
             loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_, box,
                                          box_var)
             nmsed_out = fluid.layers.detection_output(
@@ -61,7 +65,7 @@ def train(args,
         loss, nmsed_out = pd()
         loss = fluid.layers.mean(loss)
     else:
-        locs, confs, box, box_var = mobile_net(image, image_shape)
+        locs, confs, box, box_var = mobile_net(data_args, image, image_shape)
         nmsed_out = fluid.layers.detection_output(
             locs, confs, box, box_var, nms_threshold=0.45)
         loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
@@ -133,6 +137,7 @@ def test(pass_id):
     print_arguments(args)
     data_args = reader.Settings(
         dataset='coco',  # coco or pascalvoc
+        toy=args.is_toy,
         data_dir='./data/coco',
         label_file='label_list',
         apply_distort=True,

From 33faeb7b6a4285e260b6f859f5f8b9dbd2d917da Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Wed, 28 Mar 2018 20:21:30 +0800
Subject: [PATCH 06/40] fix

---
 fluid/object_detection/reader.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index d71be2d524..711a86d758 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -112,7 +112,7 @@ def reader():
         if shuffle:
             random.shuffle(images)
 
-        for image in images[:1]:
+        for image in images:
             if settings.dataset == 'coco':
                 image_name = image['file_name']
                 image_path = os.path.join(settings.data_dir, image_name)

From 73a037f16288009b1e294965d4f16eba7c13df5f Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 29 Mar 2018 22:46:59 +0800
Subject: [PATCH 07/40] fix reader bug for some gray image in coco data

---
 fluid/object_detection/image_util.py |  2 +-
 fluid/object_detection/reader.py     |  2 +-
 fluid/object_detection/train.py      | 12 ++++++++----
 3 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/fluid/object_detection/image_util.py b/fluid/object_detection/image_util.py
index e538449aa9..cd4892c3a5 100644
--- a/fluid/object_detection/image_util.py
+++ b/fluid/object_detection/image_util.py
@@ -190,7 +190,7 @@ def random_saturation(img, settings):
 
 def random_hue(img, settings):
     prob = random.uniform(0, 1)
-    if prob < settings._hue_prob:
+    if prob < settings._hue_prob and len(img.shape) == 3:
         delta = random.uniform(-settings._hue_delta, settings._hue_delta)
         img_hsv = np.array(img.convert('HSV'))
         img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 711a86d758..db0d9566f7 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -217,7 +217,7 @@ def reader():
 
             if mode == 'train':
                 mirror = int(random.uniform(0, 2))
-                if mirror == 1:
+                if mirror == 1 and len(img.shape) == 3:
                     img = img[:, ::-1, :]
                     for i in xrange(len(sample_labels)):
                         tmp = sample_labels[i][1]
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 3724b03b1f..4a6867f301 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -15,13 +15,13 @@
 add_arg('batch_size', int, 1, "Minibatch size.")
 add_arg('num_passes', int, 1, "Iteration number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
-add_arg('use_gpu', bool, False, "Whether use GPU.")
+add_arg('use_gpu', bool, True, "Whether use GPU.")
 add_arg('train_file_list', str,
         './data/coco/annotations/instances_train2014.json', "train file list")
 add_arg('val_file_list', str, './data/coco/annotations/instances_val2014.json',
         "vaild file list")
 
-add_arg('is_toy', bool, True,
+add_arg('is_toy', bool, False,
         "Is Toy for quick debug, which use only one sample")
 
 
@@ -73,18 +73,22 @@ def train(args,
         loss = fluid.layers.reduce_sum(loss)
 
     test_program = fluid.default_main_program().clone(for_test=True)
+    if data_args.dataset == 'coco':
+        num_classes = 81
+    elif data_args.dataset == 'pascalvoc':
+        num_classes = 21
     with fluid.program_guard(test_program):
         map_eval = fluid.evaluator.DetectionMAP(
             nmsed_out,
             gt_label,
             gt_box,
             difficult,
-            21,
+            num_classes,
             overlap_threshold=0.5,
             evaluate_difficult=False,
             ap_version='11point')
 
-    boundaries = [40000, 60000]
+    boundaries = [160000, 240000]
     values = [0.001, 0.0005, 0.00025]
     optimizer = fluid.optimizer.RMSProp(
         learning_rate=fluid.layers.piecewise_decay(boundaries, values),

From c164349b8e3e166258787917f52000027a3fbddc Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 30 Mar 2018 11:46:32 +0800
Subject: [PATCH 08/40] ready to train coco

---
 fluid/object_detection/image_util.py |  2 +-
 fluid/object_detection/reader.py     |  4 +++-
 fluid/object_detection/train.py      | 14 ++++++++------
 3 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/fluid/object_detection/image_util.py b/fluid/object_detection/image_util.py
index cd4892c3a5..e538449aa9 100644
--- a/fluid/object_detection/image_util.py
+++ b/fluid/object_detection/image_util.py
@@ -190,7 +190,7 @@ def random_saturation(img, settings):
 
 def random_hue(img, settings):
     prob = random.uniform(0, 1)
-    if prob < settings._hue_prob and len(img.shape) == 3:
+    if prob < settings._hue_prob:
         delta = random.uniform(-settings._hue_delta, settings._hue_delta)
         img_hsv = np.array(img.convert('HSV'))
         img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index db0d9566f7..4493af910c 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -125,6 +125,8 @@ def reader():
                     image_path = os.path.join(settings.data_dir, image)
 
             img = Image.open(image_path)
+            if img.mode == 'L':
+                img = img.convert('RGB')
             img_width, img_height = img.size
 
             if mode == 'train' or mode == 'test':
@@ -217,7 +219,7 @@ def reader():
 
             if mode == 'train':
                 mirror = int(random.uniform(0, 2))
-                if mirror == 1 and len(img.shape) == 3:
+                if mirror == 1:
                     img = img[:, ::-1, :]
                     for i in xrange(len(sample_labels)):
                         tmp = sample_labels[i][1]
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 4a6867f301..21bf654379 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -12,8 +12,8 @@
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
 add_arg('learning_rate', float, 0.001, "Learning rate.")
-add_arg('batch_size', int, 1, "Minibatch size.")
-add_arg('num_passes', int, 1, "Iteration number.")
+add_arg('batch_size', int, 64, "Minibatch size.")
+add_arg('num_passes', int, 10, "Epoch number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
 add_arg('use_gpu', bool, True, "Whether use GPU.")
 add_arg('train_file_list', str,
@@ -100,8 +100,8 @@ def train(args,
     exe = fluid.Executor(place)
     exe.run(fluid.default_startup_program())
 
-    load_model.load_and_set_vars(place)
-    #load_model.load_paddlev1_vars(place)
+    #load_model.load_and_set_vars(place)
+    load_model.load_paddlev1_vars(place)
     train_reader = paddle.batch(
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
@@ -121,12 +121,14 @@ def test(pass_id):
 
     for pass_id in range(num_passes):
         for batch_id, data in enumerate(train_reader()):
+            start_time = time.time()
             loss_v = exe.run(fluid.default_main_program(),
                              feed=feeder.feed(data),
                              fetch_list=[loss])
+            end_time = time.time()
             if batch_id % 20 == 0:
-                print("Pass {0}, batch {1}, loss {2}"
-                      .format(pass_id, batch_id, loss_v[0]))
+                print("Pass {0}, batch {1}, loss {2}, time {3}".format(
+                    pass_id, batch_id, loss_v[0], end_time - start_time))
         test(pass_id)
 
         if pass_id % 10 == 0:

From f4d7765644a8a4c5327f73bb80dffb43bae265b0 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 30 Mar 2018 20:50:45 +0800
Subject: [PATCH 09/40] fix bug in test()

---
 fluid/object_detection/reader.py | 16 +++++++++-------
 fluid/object_detection/train.py  | 16 ++++++++++++----
 2 files changed, 21 insertions(+), 11 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 4493af910c..f9c1f4b915 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -19,6 +19,7 @@
 import numpy as np
 import xml.etree.ElementTree
 import os
+import time
 import copy
 
 # cocoapi 
@@ -106,8 +107,9 @@ def reader():
             flist = open(file_list)
             images = [line.strip() for line in flist]
 
-        if settings.toy:
-            images = images[:1] if len(images) > 1 else images
+        if not settings.toy == 0:
+            images = images[:settings.toy] if len(
+                images) > settings.toy else images
 
         if shuffle:
             random.shuffle(images)
@@ -148,11 +150,11 @@ def reader():
                         bbox_sample.append(float(xmax) / img_width)
                         bbox_sample.append(float(ymax) / img_height)
                         bbox_sample.append(ann['iscrowd'])
-                        bbox_sample.append(ann['bbox'])
-                        bbox_sample.append(ann['segmentation'])
-                        bbox_sample.append(ann['area'])
-                        bbox_sample.append(ann['image_id'])
-                        bbox_sample.append(ann['id'])
+                        #bbox_sample.append(ann['bbox'])
+                        #bbox_sample.append(ann['segmentation'])
+                        #bbox_sample.append(ann['area'])
+                        #bbox_sample.append(ann['image_id'])
+                        #bbox_sample.append(ann['id'])
                         bbox_labels.append(bbox_sample)
                 elif settings.dataset == 'pascalvoc':
                     # layout: label | xmin | ymin | xmax | ymax | difficult
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 21bf654379..28c43fb167 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -5,6 +5,7 @@
 from mobilenet_ssd import mobile_net
 from utility import add_arguments, print_arguments
 import os
+import time
 import numpy as np
 import argparse
 import functools
@@ -13,7 +14,7 @@
 add_arg = functools.partial(add_arguments, argparser=parser)
 add_arg('learning_rate', float, 0.001, "Learning rate.")
 add_arg('batch_size', int, 64, "Minibatch size.")
-add_arg('num_passes', int, 10, "Epoch number.")
+add_arg('num_passes', int, 20, "Epoch number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
 add_arg('use_gpu', bool, True, "Whether use GPU.")
 add_arg('train_file_list', str,
@@ -21,8 +22,10 @@
 add_arg('val_file_list', str, './data/coco/annotations/instances_val2014.json',
         "vaild file list")
 
-add_arg('is_toy', bool, False,
-        "Is Toy for quick debug, which use only one sample")
+add_arg(
+    'is_toy', int, 0,
+    "Is Toy for quick debug, 0 means using all data, while n means using only n sample"
+)
 
 
 def train(args,
@@ -120,15 +123,20 @@ def test(pass_id):
         print("Test {0}, map {1}".format(pass_id, test_map[0]))
 
     for pass_id in range(num_passes):
+        start_time = time.time()
+        prev_start_time = start_time
+        end_time = 0
         for batch_id, data in enumerate(train_reader()):
+            prev_start_time = start_time
             start_time = time.time()
+            #print("Batch {} start at {:.2f}".format(batch_id, start_time))
             loss_v = exe.run(fluid.default_main_program(),
                              feed=feeder.feed(data),
                              fetch_list=[loss])
             end_time = time.time()
             if batch_id % 20 == 0:
                 print("Pass {0}, batch {1}, loss {2}, time {3}".format(
-                    pass_id, batch_id, loss_v[0], end_time - start_time))
+                    pass_id, batch_id, loss_v[0], start_time - prev_start_time))
         test(pass_id)
 
         if pass_id % 10 == 0:

From 9fd6d88266043d63b096b0550cc362fa65965615 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 30 Mar 2018 23:44:14 +0800
Subject: [PATCH 10/40] fix bug in test()

---
 fluid/object_detection/reader.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index f9c1f4b915..2be7767e69 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -241,6 +241,7 @@ def reader():
             sample_labels = np.array(sample_labels)
             if mode == 'train' or mode == 'test':
                 if mode == 'train' and len(sample_labels) == 0: continue
+                if mode == 'test' and len(sample_labels) == 0: continue
                 yield img.astype(
                     'float32'
                 ), sample_labels[:, 1:5], sample_labels[:, 0].astype(

From dde4fcda068247c9f84d7160477e2a33d8e7f3da Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Mon, 2 Apr 2018 16:39:15 +0800
Subject: [PATCH 11/40] change coco dataset to coco2017 dataset

---
 fluid/object_detection/reader.py | 10 +++++---
 fluid/object_detection/train.py  | 42 ++++++++++++++++++++++----------
 2 files changed, 35 insertions(+), 17 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 2be7767e69..8f5ae74fde 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -110,6 +110,8 @@ def reader():
         if not settings.toy == 0:
             images = images[:settings.toy] if len(
                 images) > settings.toy else images
+        print("{} on {} with {} images".format(mode, settings.dataset,
+                                               len(images)))
 
         if shuffle:
             random.shuffle(images)
@@ -228,10 +230,11 @@ def reader():
                         sample_labels[i][1] = 1 - sample_labels[i][3]
                         sample_labels[i][3] = 1 - tmp
 
+            # HWC to CHW
             if len(img.shape) == 3:
                 img = np.swapaxes(img, 1, 2)
                 img = np.swapaxes(img, 1, 0)
-
+            # RBG to BGR
             img = img[[2, 1, 0], :, :]
             img = img.astype('float32')
             img -= settings.img_mean
@@ -255,8 +258,7 @@ def reader():
 def train(settings, file_list, shuffle=True):
     if settings.dataset == 'coco':
         train_settings = copy.copy(settings)
-        train_settings.data_dir = os.path.join(settings.data_dir,
-                                               "coco_train2014")
+        train_settings.data_dir = os.path.join(settings.data_dir, "train2017")
         return _reader_creator(train_settings, file_list, 'train', shuffle)
     elif settings.dataset == 'pascalvoc':
         return _reader_creator(settings, file_list, 'train', shuffle)
@@ -265,7 +267,7 @@ def train(settings, file_list, shuffle=True):
 def test(settings, file_list):
     if settings.dataset == 'coco':
         test_settings = copy.copy(settings)
-        test_settings.data_dir = os.path.join(settings.data_dir, "coco_val2014")
+        test_settings.data_dir = os.path.join(settings.data_dir, "val2017")
         return _reader_creator(test_settings, file_list, 'test', False)
     elif settings.dataset == 'pascalvoc':
         return _reader_creator(settings, file_list, 'test', False)
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 28c43fb167..22e99fe3da 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -18,14 +18,29 @@
 add_arg('parallel', bool, True, "Whether use parallel training.")
 add_arg('use_gpu', bool, True, "Whether use GPU.")
 add_arg('train_file_list', str,
-        './data/coco/annotations/instances_train2014.json', "train file list")
-add_arg('val_file_list', str, './data/coco/annotations/instances_val2014.json',
+        './data/COCO17/annotations/stuff_train2017.json', "train file list")
+add_arg('val_file_list', str, './data/COCO17/annotations/stuff_val2017.json',
         "vaild file list")
+add_arg('model_save_dir', str, 'model_coco_pretrain', "where to save model")
 
+add_arg('dataset', str, 'coco', "coco or pascalvoc")
 add_arg(
     'is_toy', int, 0,
     "Is Toy for quick debug, 0 means using all data, while n means using only n sample"
 )
+add_arg('data_dir', str, './data/COCO17', "Root path of data")
+add_arg('label_file', str, 'label_list',
+        "Lable file which lists all label name")
+add_arg('apply_distort', bool, True, "Whether apply distort")
+add_arg('apply_expand', bool, True, "Whether appley expand")
+add_arg('resize_h', int, 300, "resize image size")
+add_arg('resize_w', int, 300, "resize image size")
+add_arg('mean_value_B', float, 127.5,
+        "mean value which will be subtracted")  #123.68
+add_arg('mean_value_G', float, 127.5,
+        "mean value which will be subtracted")  #116.78
+add_arg('mean_value_R', float, 127.5,
+        "mean value which will be subtracted")  #103.94
 
 
 def train(args,
@@ -103,8 +118,8 @@ def train(args,
     exe = fluid.Executor(place)
     exe.run(fluid.default_startup_program())
 
-    #load_model.load_and_set_vars(place)
-    load_model.load_paddlev1_vars(place)
+    load_model.load_and_set_vars(place)
+    #load_model.load_paddlev1_vars(place)
     train_reader = paddle.batch(
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
@@ -150,15 +165,15 @@ def test(pass_id):
     args = parser.parse_args()
     print_arguments(args)
     data_args = reader.Settings(
-        dataset='coco',  # coco or pascalvoc
+        dataset=args.dataset,  # coco or pascalvoc
         toy=args.is_toy,
-        data_dir='./data/coco',
-        label_file='label_list',
-        apply_distort=True,
-        apply_expand=True,
-        resize_h=300,
-        resize_w=300,
-        mean_value=[127.5, 127.5, 127.5])
+        data_dir=args.data_dir,
+        label_file=args.label_file,
+        apply_distort=args.apply_distort,
+        apply_expand=args.apply_expand,
+        resize_h=args.resize_h,
+        resize_w=args.resize_w,
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
     train(
         args,
         train_file_list=args.train_file_list,
@@ -166,4 +181,5 @@ def test(pass_id):
         data_args=data_args,
         learning_rate=args.learning_rate,
         batch_size=args.batch_size,
-        num_passes=args.num_passes)
+        num_passes=args.num_passes,
+        model_save_dir=args.model_save_dir)

From c4e9e17061469bdef37eb8ee2c6236b43232c72e Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Mon, 2 Apr 2018 17:57:10 +0800
Subject: [PATCH 12/40] change dataset from coco to coco2017

---
 fluid/object_detection/train.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 22e99fe3da..88028bacea 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -18,9 +18,9 @@
 add_arg('parallel', bool, True, "Whether use parallel training.")
 add_arg('use_gpu', bool, True, "Whether use GPU.")
 add_arg('train_file_list', str,
-        './data/COCO17/annotations/stuff_train2017.json', "train file list")
-add_arg('val_file_list', str, './data/COCO17/annotations/stuff_val2017.json',
-        "vaild file list")
+        './data/COCO17/annotations/instances_train2017.json', "train file list")
+add_arg('val_file_list', str,
+        './data/COCO17/annotations/instances_val2017.json', "vaild file list")
 add_arg('model_save_dir', str, 'model_coco_pretrain', "where to save model")
 
 add_arg('dataset', str, 'coco', "coco or pascalvoc")

From 69afb9945f1c50d3c6526503c28c433b731abb04 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Tue, 3 Apr 2018 11:51:30 +0800
Subject: [PATCH 13/40] change learning rate

---
 fluid/object_detection/mobilenet_ssd.py | 15 ++++------
 fluid/object_detection/reader.py        | 14 ++++++++--
 fluid/object_detection/train.py         | 37 +++++++++++++------------
 3 files changed, 38 insertions(+), 28 deletions(-)

diff --git a/fluid/object_detection/mobilenet_ssd.py b/fluid/object_detection/mobilenet_ssd.py
index bfe783263e..de71ad3ff1 100644
--- a/fluid/object_detection/mobilenet_ssd.py
+++ b/fluid/object_detection/mobilenet_ssd.py
@@ -13,7 +13,7 @@ def conv_bn(input,
             num_groups=1,
             act='relu',
             use_cudnn=True):
-    parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
+    parameter_attr = ParamAttr(initializer=MSRA())
     conv = fluid.layers.conv2d(
         input=input,
         num_filters=num_filters,
@@ -25,14 +25,11 @@ def conv_bn(input,
         use_cudnn=use_cudnn,
         param_attr=parameter_attr,
         bias_attr=False)
-    parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
-    bias_attr = ParamAttr(learning_rate=0.2)
-    return fluid.layers.batch_norm(
-        input=conv,
-        act=act,
-        epsilon=0.00001,
-        param_attr=parameter_attr,
-        bias_attr=bias_attr)
+    #parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
+    #bias_attr = ParamAttr(learning_rate=0.2)
+    return fluid.layers.batch_norm(input=conv, act=act, epsilon=0.00001)
+    #param_attr=parameter_attr,
+    #bias_attr=bias_attr)
 
 
 def depthwise_separable(input, num_filters1, num_filters2, num_groups, stride,
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 8f5ae74fde..1bda954139 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -258,7 +258,12 @@ def reader():
 def train(settings, file_list, shuffle=True):
     if settings.dataset == 'coco':
         train_settings = copy.copy(settings)
-        train_settings.data_dir = os.path.join(settings.data_dir, "train2017")
+        if '2014' in file_list:
+            sub_dir = "train2014"
+        elif '2017' in file_list:
+            sub_dir = "train2017"
+        train_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
+        file_list = os.path.join(settings.data_dir, file_list)
         return _reader_creator(train_settings, file_list, 'train', shuffle)
     elif settings.dataset == 'pascalvoc':
         return _reader_creator(settings, file_list, 'train', shuffle)
@@ -267,7 +272,12 @@ def train(settings, file_list, shuffle=True):
 def test(settings, file_list):
     if settings.dataset == 'coco':
         test_settings = copy.copy(settings)
-        test_settings.data_dir = os.path.join(settings.data_dir, "val2017")
+        if '2014' in file_list:
+            sub_dir = "val2014"
+        elif '2017' in file_list:
+            sub_dir = "val2017"
+        test_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
+        file_list = os.path.join(settings.data_dir, file_list)
         return _reader_creator(test_settings, file_list, 'test', False)
     elif settings.dataset == 'pascalvoc':
         return _reader_creator(settings, file_list, 'test', False)
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 88028bacea..b3c4cad60f 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -13,14 +13,15 @@
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
 add_arg('learning_rate', float, 0.001, "Learning rate.")
-add_arg('batch_size', int, 64, "Minibatch size.")
+add_arg('batch_size', int, 32, "Minibatch size.")
 add_arg('num_passes', int, 20, "Epoch number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
 add_arg('use_gpu', bool, True, "Whether use GPU.")
-add_arg('train_file_list', str,
-        './data/COCO17/annotations/instances_train2017.json', "train file list")
-add_arg('val_file_list', str,
-        './data/COCO17/annotations/instances_val2017.json', "vaild file list")
+add_arg('data_dir', str, './data/coco', "Root path of data")
+add_arg('train_file_list', str, 'annotations/instances_train2014.json',
+        "train file list")
+add_arg('val_file_list', str, 'annotations/instances_minival2014.json',
+        "vaild file list")
 add_arg('model_save_dir', str, 'model_coco_pretrain', "where to save model")
 
 add_arg('dataset', str, 'coco', "coco or pascalvoc")
@@ -28,11 +29,10 @@
     'is_toy', int, 0,
     "Is Toy for quick debug, 0 means using all data, while n means using only n sample"
 )
-add_arg('data_dir', str, './data/COCO17', "Root path of data")
 add_arg('label_file', str, 'label_list',
         "Lable file which lists all label name")
-add_arg('apply_distort', bool, True, "Whether apply distort")
-add_arg('apply_expand', bool, True, "Whether appley expand")
+add_arg('apply_distort', bool, False, "Whether apply distort")
+add_arg('apply_expand', bool, False, "Whether appley expand")
 add_arg('resize_h', int, 300, "resize image size")
 add_arg('resize_w', int, 300, "resize image size")
 add_arg('mean_value_B', float, 127.5,
@@ -64,7 +64,7 @@ def train(args,
 
     if args.parallel:
         places = fluid.layers.get_places()
-        pd = fluid.layers.ParallelDo(places)
+        pd = fluid.layers.ParallelDo(places, use_nccl=True)
         with pd.do():
             image_ = pd.read_input(image)
             gt_box_ = pd.read_input(gt_box)
@@ -72,16 +72,19 @@ def train(args,
             difficult_ = pd.read_input(difficult)
             locs, confs, box, box_var = mobile_net(data_args, image_,
                                                    image_shape)
-            loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_, box,
-                                         box_var)
+            loss, w = fluid.layers.ssd_loss(
+                locs, confs, gt_box_, gt_label_, box, box_var, normalize=False)
             nmsed_out = fluid.layers.detection_output(
                 locs, confs, box, box_var, nms_threshold=0.45)
-            loss = fluid.layers.reduce_sum(loss)
+            #loss = fluid.layers.reduce_sum(loss)
             pd.write_output(loss)
+            pd.write_output(w)
             pd.write_output(nmsed_out)
 
-        loss, nmsed_out = pd()
-        loss = fluid.layers.mean(loss)
+        loss, w, nmsed_out = pd()
+        normalizer = fluid.layers.reduce_sum(w)
+        loss = loss / normalizer
+        loss = fluid.layers.reduce_sum(loss)
     else:
         locs, confs, box, box_var = mobile_net(data_args, image, image_shape)
         nmsed_out = fluid.layers.detection_output(
@@ -104,10 +107,10 @@ def train(args,
             num_classes,
             overlap_threshold=0.5,
             evaluate_difficult=False,
-            ap_version='11point')
+            ap_version='integral')
 
-    boundaries = [160000, 240000]
-    values = [0.001, 0.0005, 0.00025]
+    boundaries = [16000, 24000]
+    values = [0.0001, 0.00005, 0.000025]
     optimizer = fluid.optimizer.RMSProp(
         learning_rate=fluid.layers.piecewise_decay(boundaries, values),
         regularization=fluid.regularizer.L2Decay(0.00005), )

From ed70cbffe5085c02727a30cf3a99574b0ec0e63a Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Tue, 3 Apr 2018 15:13:52 +0800
Subject: [PATCH 14/40] fix bug in gt label (category id 2 label)

---
 fluid/object_detection/reader.py | 43 ++++++++++++++++++++++++++++++--
 fluid/object_detection/train.py  | 17 ++++++-------
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 1bda954139..25edd23949 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -16,6 +16,7 @@
 from paddle.utils.image_util import *
 import random
 from PIL import Image
+from PIL import ImageDraw
 import numpy as np
 import xml.etree.ElementTree
 import os
@@ -103,6 +104,10 @@ def reader():
             coco = COCO(file_list)
             image_ids = coco.getImgIds()
             images = coco.loadImgs(image_ids)
+            category_ids = coco.getCatIds()
+            category_names = [
+                item['name'] for item in coco.loadCats(category_ids)
+            ]
         elif settings.dataset == 'pascalvoc':
             flist = open(file_list)
             images = [line.strip() for line in flist]
@@ -142,7 +147,8 @@ def reader():
                     for ann in anns:
                         bbox_sample = []
                         # start from 1
-                        bbox_sample.append(ann['category_id'])
+                        bbox_sample.append(
+                            float(category_ids.index(ann['category_id'])))
                         bbox = ann['bbox']
                         xmin, ymin, w, h = bbox
                         xmax = xmin + w
@@ -151,7 +157,7 @@ def reader():
                         bbox_sample.append(float(ymin) / img_height)
                         bbox_sample.append(float(xmax) / img_width)
                         bbox_sample.append(float(ymax) / img_height)
-                        bbox_sample.append(ann['iscrowd'])
+                        bbox_sample.append(float(ann['iscrowd']))
                         #bbox_sample.append(ann['bbox'])
                         #bbox_sample.append(ann['segmentation'])
                         #bbox_sample.append(ann['area'])
@@ -230,6 +236,7 @@ def reader():
                         sample_labels[i][1] = 1 - sample_labels[i][3]
                         sample_labels[i][3] = 1 - tmp
 
+            #draw_bounding_box_on_image(img, sample_labels, image_name, category_names, normalized=True)
             # HWC to CHW
             if len(img.shape) == 3:
                 img = np.swapaxes(img, 1, 2)
@@ -255,6 +262,38 @@ def reader():
     return reader
 
 
+def draw_bounding_box_on_image(image,
+                               sample_labels,
+                               image_name,
+                               category_names,
+                               color='red',
+                               thickness=4,
+                               with_text=True,
+                               normalized=True):
+    image = Image.fromarray(image)
+    draw = ImageDraw.Draw(image)
+    im_width, im_height = image.size
+    if not normalized:
+        im_width, im_height = 1, 1
+    for item in sample_labels:
+        label = item[0]
+        category_name = category_names[int(label)]
+        bbox = item[1:5]
+        xmin, ymin, xmax, ymax = bbox
+        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
+                                      ymin * im_height, ymax * im_height)
+        draw.line(
+            [(left, top), (left, bottom), (right, bottom), (right, top),
+             (left, top)],
+            width=thickness,
+            fill=color)
+        #draw.rectangle([xmin, ymin, xmax, ymax], outline=color)
+        if with_text:
+            if image.mode == 'RGB':
+                draw.text((left, top), category_name, (255, 255, 0))
+    image.save(image_name)
+
+
 def train(settings, file_list, shuffle=True):
     if settings.dataset == 'coco':
         train_settings = copy.copy(settings)
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index b3c4cad60f..24125518e6 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -12,7 +12,7 @@
 
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
-add_arg('learning_rate', float, 0.001, "Learning rate.")
+add_arg('learning_rate', float, 0.0001, "Learning rate.")
 add_arg('batch_size', int, 32, "Minibatch size.")
 add_arg('num_passes', int, 20, "Epoch number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
@@ -64,7 +64,7 @@ def train(args,
 
     if args.parallel:
         places = fluid.layers.get_places()
-        pd = fluid.layers.ParallelDo(places, use_nccl=True)
+        pd = fluid.layers.ParallelDo(places)
         with pd.do():
             image_ = pd.read_input(image)
             gt_box_ = pd.read_input(gt_box)
@@ -72,19 +72,16 @@ def train(args,
             difficult_ = pd.read_input(difficult)
             locs, confs, box, box_var = mobile_net(data_args, image_,
                                                    image_shape)
-            loss, w = fluid.layers.ssd_loss(
-                locs, confs, gt_box_, gt_label_, box, box_var, normalize=False)
+            loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_, box,
+                                         box_var)
             nmsed_out = fluid.layers.detection_output(
                 locs, confs, box, box_var, nms_threshold=0.45)
-            #loss = fluid.layers.reduce_sum(loss)
+            loss = fluid.layers.reduce_sum(loss)
             pd.write_output(loss)
-            pd.write_output(w)
             pd.write_output(nmsed_out)
 
-        loss, w, nmsed_out = pd()
-        normalizer = fluid.layers.reduce_sum(w)
-        loss = loss / normalizer
-        loss = fluid.layers.reduce_sum(loss)
+        loss, nmsed_out = pd()
+        loss = fluid.layers.mean(loss)
     else:
         locs, confs, box, box_var = mobile_net(data_args, image, image_shape)
         nmsed_out = fluid.layers.detection_output(

From 027df47c69f4ab4c64a84914ab8121eb66b6e335 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Wed, 4 Apr 2018 19:29:26 +0800
Subject: [PATCH 15/40] fix bug in background label

---
 fluid/object_detection/reader.py |  4 ++--
 fluid/object_detection/train.py  | 14 +++++++-------
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 25edd23949..29ac65a500 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -146,9 +146,9 @@ def reader():
                     anns = coco.loadAnns(annIds)
                     for ann in anns:
                         bbox_sample = []
-                        # start from 1
+                        # start from 1, leave 0 to background
                         bbox_sample.append(
-                            float(category_ids.index(ann['category_id'])))
+                            float(category_ids.index(ann['category_id'])) + 1)
                         bbox = ann['bbox']
                         xmin, ymin, w, h = bbox
                         xmax = xmin + w
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 24125518e6..c5f23c0592 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -1,4 +1,4 @@
-import paddle.v2 as paddle
+import paddle
 import paddle.fluid as fluid
 import reader
 import load_model as load_model
@@ -12,7 +12,7 @@
 
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
-add_arg('learning_rate', float, 0.0001, "Learning rate.")
+add_arg('learning_rate', float, 0.001, "Learning rate.")
 add_arg('batch_size', int, 32, "Minibatch size.")
 add_arg('num_passes', int, 20, "Epoch number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
@@ -22,7 +22,7 @@
         "train file list")
 add_arg('val_file_list', str, 'annotations/instances_minival2014.json',
         "vaild file list")
-add_arg('model_save_dir', str, 'model_coco_pretrain', "where to save model")
+add_arg('model_save_dir', str, 'model', "where to save model")
 
 add_arg('dataset', str, 'coco', "coco or pascalvoc")
 add_arg(
@@ -106,8 +106,8 @@ def train(args,
             evaluate_difficult=False,
             ap_version='integral')
 
-    boundaries = [16000, 24000]
-    values = [0.0001, 0.00005, 0.000025]
+    boundaries = [32000, 48000]
+    values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25]
     optimizer = fluid.optimizer.RMSProp(
         learning_rate=fluid.layers.piecewise_decay(boundaries, values),
         regularization=fluid.regularizer.L2Decay(0.00005), )
@@ -118,8 +118,8 @@ def train(args,
     exe = fluid.Executor(place)
     exe.run(fluid.default_startup_program())
 
-    load_model.load_and_set_vars(place)
-    #load_model.load_paddlev1_vars(place)
+    #load_model.load_and_set_vars(place)
+    load_model.load_paddlev1_vars(place)
     train_reader = paddle.batch(
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(

From 2cf5084b9a53e066338071adc8cbccb24f20fb4f Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Sun, 8 Apr 2018 11:55:36 +0800
Subject: [PATCH 16/40] save model when train finished

---
 fluid/object_detection/mobilenet_ssd.py |  6 +----
 fluid/object_detection/train.py         | 35 +++++++++++++++----------
 2 files changed, 22 insertions(+), 19 deletions(-)

diff --git a/fluid/object_detection/mobilenet_ssd.py b/fluid/object_detection/mobilenet_ssd.py
index de71ad3ff1..01fa32cfb8 100644
--- a/fluid/object_detection/mobilenet_ssd.py
+++ b/fluid/object_detection/mobilenet_ssd.py
@@ -73,7 +73,7 @@ def extra_block(input, num_filters1, num_filters2, num_groups, stride, scale):
     return normal_conv
 
 
-def mobile_net(data_args, img, img_shape, scale=1.0):
+def mobile_net(num_classes, img, img_shape, scale=1.0):
     # 300x300
     tmp = conv_bn(img, 3, int(32 * scale), 2, 1, 3)
     # 150x150
@@ -102,10 +102,6 @@ def mobile_net(data_args, img, img_shape, scale=1.0):
     # 2x2
     module17 = extra_block(module16, 64, 128, 1, 2, scale)
 
-    if data_args.dataset == 'coco':
-        num_classes = 81
-    elif data_args.dataset == 'pascalvoc':
-        num_classes = 21
     mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
         inputs=[module11, module13, module14, module15, module16, module17],
         image=img,
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index c5f23c0592..1691955d5f 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -14,15 +14,15 @@
 add_arg = functools.partial(add_arguments, argparser=parser)
 add_arg('learning_rate', float, 0.001, "Learning rate.")
 add_arg('batch_size', int, 32, "Minibatch size.")
-add_arg('num_passes', int, 20, "Epoch number.")
+add_arg('num_passes', int, 25, "Epoch number.")
 add_arg('parallel', bool, True, "Whether use parallel training.")
 add_arg('use_gpu', bool, True, "Whether use GPU.")
-add_arg('data_dir', str, './data/coco', "Root path of data")
-add_arg('train_file_list', str, 'annotations/instances_train2014.json',
+add_arg('data_dir', str, './data/COCO17', "Root path of data")
+add_arg('train_file_list', str, 'annotations/instances_train2017.json',
         "train file list")
-add_arg('val_file_list', str, 'annotations/instances_minival2014.json',
+add_arg('val_file_list', str, 'annotations/instances_val2017.json',
         "vaild file list")
-add_arg('model_save_dir', str, 'model', "where to save model")
+add_arg('model_save_dir', str, 'model_COCO17', "where to save model")
 
 add_arg('dataset', str, 'coco', "coco or pascalvoc")
 add_arg(
@@ -31,7 +31,7 @@
 )
 add_arg('label_file', str, 'label_list',
         "Lable file which lists all label name")
-add_arg('apply_distort', bool, False, "Whether apply distort")
+add_arg('apply_distort', bool, True, "Whether apply distort")
 add_arg('apply_expand', bool, False, "Whether appley expand")
 add_arg('resize_h', int, 300, "resize image size")
 add_arg('resize_w', int, 300, "resize image size")
@@ -53,6 +53,10 @@ def train(args,
           model_save_dir='model',
           init_model_path=None):
     image_shape = [3, data_args.resize_h, data_args.resize_w]
+    if data_args.dataset == 'coco':
+        num_classes = 81
+    elif data_args.dataset == 'pascalvoc':
+        num_classes = 21
 
     image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
     gt_box = fluid.layers.data(
@@ -70,7 +74,7 @@ def train(args,
             gt_box_ = pd.read_input(gt_box)
             gt_label_ = pd.read_input(gt_label)
             difficult_ = pd.read_input(difficult)
-            locs, confs, box, box_var = mobile_net(data_args, image_,
+            locs, confs, box, box_var = mobile_net(num_classes, image_,
                                                    image_shape)
             loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_, box,
                                          box_var)
@@ -83,7 +87,7 @@ def train(args,
         loss, nmsed_out = pd()
         loss = fluid.layers.mean(loss)
     else:
-        locs, confs, box, box_var = mobile_net(data_args, image, image_shape)
+        locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
         nmsed_out = fluid.layers.detection_output(
             locs, confs, box, box_var, nms_threshold=0.45)
         loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
@@ -91,10 +95,6 @@ def train(args,
         loss = fluid.layers.reduce_sum(loss)
 
     test_program = fluid.default_main_program().clone(for_test=True)
-    if data_args.dataset == 'coco':
-        num_classes = 81
-    elif data_args.dataset == 'pascalvoc':
-        num_classes = 21
     with fluid.program_guard(test_program):
         map_eval = fluid.evaluator.DetectionMAP(
             nmsed_out,
@@ -106,7 +106,14 @@ def train(args,
             evaluate_difficult=False,
             ap_version='integral')
 
-    boundaries = [32000, 48000]
+    if data_args.dataset == 'coco':
+        # learning rate decay in 12, 19 pass, respectively
+        if '2014' in train_file_list:
+            boundaries = [82783 / batch_size * 12, 82783 / batch_size * 19]
+        elif '2017' in train_file_list:
+            boundaries = [118287 / batch_size * 12, 118287 / batch_size * 19]
+    elif data_args.dataset == 'pascalvoc':
+        boundaries = [40000, 60000]
     values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25]
     optimizer = fluid.optimizer.RMSProp(
         learning_rate=fluid.layers.piecewise_decay(boundaries, values),
@@ -154,7 +161,7 @@ def test(pass_id):
                     pass_id, batch_id, loss_v[0], start_time - prev_start_time))
         test(pass_id)
 
-        if pass_id % 10 == 0:
+        if pass_id % 10 == 0 or pass_id == num_passes - 1:
             model_path = os.path.join(model_save_dir, str(pass_id))
             print 'save models to %s' % (model_path)
             fluid.io.save_inference_model(model_path, ['image'], [nmsed_out],

From 4a059e7207254c45889cc4d22eda403f7bf39704 Mon Sep 17 00:00:00 2001
From: Bu <buxingyuan@baidu.com>
Date: Sun, 8 Apr 2018 21:58:50 +0800
Subject: [PATCH 17/40] use coco map

---
 fluid/object_detection/image_util.py | 37 ++++++++++-
 fluid/object_detection/reader.py     | 67 ++++++-------------
 fluid/object_detection/train.py      | 97 ++++++++++++++++++++++------
 3 files changed, 135 insertions(+), 66 deletions(-)

diff --git a/fluid/object_detection/image_util.py b/fluid/object_detection/image_util.py
index e538449aa9..db8cf6bfa0 100644
--- a/fluid/object_detection/image_util.py
+++ b/fluid/object_detection/image_util.py
@@ -1,4 +1,4 @@
-from PIL import Image, ImageEnhance
+from PIL import Image, ImageEnhance, ImageDraw
 import numpy as np
 import random
 import math
@@ -145,7 +145,8 @@ def transform_labels(bbox_labels, sample_bbox):
             sample_label.append(float(proj_bbox.ymin))
             sample_label.append(float(proj_bbox.xmax))
             sample_label.append(float(proj_bbox.ymax))
-            sample_label.append(bbox_labels[i][5])
+            #sample_label.append(bbox_labels[i][5])
+            sample_label = sample_label + bbox_labels[i][5:]
             sample_labels.append(sample_label)
     return sample_labels
 
@@ -233,3 +234,35 @@ def expand_image(img, bbox_labels, img_width, img_height, settings):
             bbox_labels = transform_labels(bbox_labels, expand_bbox)
             return expand_img, bbox_labels
     return img, bbox_labels
+
+
+def draw_bounding_box_on_image(image,
+                               sample_labels,
+                               image_name,
+                               category_names,
+                               color='red',
+                               thickness=4,
+                               with_text=True,
+                               normalized=True):
+    image = Image.fromarray(image)
+    draw = ImageDraw.Draw(image)
+    im_width, im_height = image.size
+    if not normalized:
+        im_width, im_height = 1, 1
+    for item in sample_labels:
+        label = item[0]
+        category_name = category_names[int(label)]
+        bbox = item[1:5]
+        xmin, ymin, xmax, ymax = bbox
+        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
+                                      ymin * im_height, ymax * im_height)
+        draw.line(
+            [(left, top), (left, bottom), (right, bottom), (right, top),
+             (left, top)],
+            width=thickness,
+            fill=color)
+        #draw.rectangle([xmin, ymin, xmax, ymax], outline=color)
+        if with_text:
+            if image.mode == 'RGB':
+                draw.text((left, top), category_name, (255, 255, 0))
+    image.save(image_name)
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 29ac65a500..55c44e4ef3 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -16,7 +16,6 @@
 from paddle.utils.image_util import *
 import random
 from PIL import Image
-from PIL import ImageDraw
 import numpy as np
 import xml.etree.ElementTree
 import os
@@ -137,10 +136,11 @@ def reader():
             if img.mode == 'L':
                 img = img.convert('RGB')
             img_width, img_height = img.size
+            img_id = image['id']
 
             if mode == 'train' or mode == 'test':
                 if settings.dataset == 'coco':
-                    # layout: category_id | xmin | ymin | xmax | ymax | iscrowd | origin_coco_bbox | segmentation | area | image_id | annotation_id
+                    # layout: label | xmin | ymin | xmax | ymax | iscrowd | area | image_id | category_id
                     bbox_labels = []
                     annIds = coco.getAnnIds(imgIds=image['id'])
                     anns = coco.loadAnns(annIds)
@@ -148,7 +148,8 @@ def reader():
                         bbox_sample = []
                         # start from 1, leave 0 to background
                         bbox_sample.append(
-                            float(category_ids.index(ann['category_id'])) + 1)
+                            float(ann['category_id']))
+                            #float(category_ids.index(ann['category_id'])) + 1)
                         bbox = ann['bbox']
                         xmin, ymin, w, h = bbox
                         xmax = xmin + w
@@ -158,11 +159,12 @@ def reader():
                         bbox_sample.append(float(xmax) / img_width)
                         bbox_sample.append(float(ymax) / img_height)
                         bbox_sample.append(float(ann['iscrowd']))
-                        #bbox_sample.append(ann['bbox'])
-                        #bbox_sample.append(ann['segmentation'])
                         #bbox_sample.append(ann['area'])
                         #bbox_sample.append(ann['image_id'])
+                        #bbox_sample.append(ann['category_id'])
                         #bbox_sample.append(ann['id'])
+                        #bbox_sample.append(ann['bbox'])
+                        #bbox_sample.append(ann['segmentation'])
                         bbox_labels.append(bbox_sample)
                 elif settings.dataset == 'pascalvoc':
                     # layout: label | xmin | ymin | xmax | ymax | difficult
@@ -236,7 +238,6 @@ def reader():
                         sample_labels[i][1] = 1 - sample_labels[i][3]
                         sample_labels[i][3] = 1 - tmp
 
-            #draw_bounding_box_on_image(img, sample_labels, image_name, category_names, normalized=True)
             # HWC to CHW
             if len(img.shape) == 3:
                 img = np.swapaxes(img, 1, 2)
@@ -248,52 +249,26 @@ def reader():
             img = img.flatten()
             img = img * 0.007843
 
-            sample_labels = np.array(sample_labels)
             if mode == 'train' or mode == 'test':
-                if mode == 'train' and len(sample_labels) == 0: continue
-                if mode == 'test' and len(sample_labels) == 0: continue
-                yield img.astype(
-                    'float32'
-                ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
-                    'int32'), sample_labels[:, -1].astype('int32')
+                sample_labels = np.array(sample_labels)
+                if len(sample_labels) == 0:
+                    continue
+                if settings.dataset == 'coco':
+                    yield img.astype('float32'), \
+                        sample_labels[:, 1:5], \
+                        sample_labels[:, 0].astype('int32'), \
+                        sample_labels[:, 5].astype('int32'), \
+                        [img_id, img_width, img_height]
+                elif settings.dataset == 'pascalvoc':
+                    yield img.astype(
+                        'float32'
+                    ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
+                        'int32'), sample_labels[:, -1].astype('int32')
             elif mode == 'infer':
                 yield img.astype('float32')
 
     return reader
 
-
-def draw_bounding_box_on_image(image,
-                               sample_labels,
-                               image_name,
-                               category_names,
-                               color='red',
-                               thickness=4,
-                               with_text=True,
-                               normalized=True):
-    image = Image.fromarray(image)
-    draw = ImageDraw.Draw(image)
-    im_width, im_height = image.size
-    if not normalized:
-        im_width, im_height = 1, 1
-    for item in sample_labels:
-        label = item[0]
-        category_name = category_names[int(label)]
-        bbox = item[1:5]
-        xmin, ymin, xmax, ymax = bbox
-        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
-                                      ymin * im_height, ymax * im_height)
-        draw.line(
-            [(left, top), (left, bottom), (right, bottom), (right, top),
-             (left, top)],
-            width=thickness,
-            fill=color)
-        #draw.rectangle([xmin, ymin, xmax, ymax], outline=color)
-        if with_text:
-            if image.mode == 'RGB':
-                draw.text((left, top), category_name, (255, 255, 0))
-    image.save(image_name)
-
-
 def train(settings, file_list, shuffle=True):
     if settings.dataset == 'coco':
         train_settings = copy.copy(settings)
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 1691955d5f..1a39aaf954 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -1,6 +1,7 @@
 import paddle
 import paddle.fluid as fluid
 import reader
+import image_util
 import load_model as load_model
 from mobilenet_ssd import mobile_net
 from utility import add_arguments, print_arguments
@@ -14,9 +15,10 @@
 add_arg = functools.partial(add_arguments, argparser=parser)
 add_arg('learning_rate', float, 0.001, "Learning rate.")
 add_arg('batch_size', int, 32, "Minibatch size.")
-add_arg('num_passes', int, 25, "Epoch number.")
-add_arg('parallel', bool, True, "Whether use parallel training.")
-add_arg('use_gpu', bool, True, "Whether use GPU.")
+add_arg('num_passes', int, 0, "Epoch number.")
+add_arg('nms_threshold', float, 0.5, "NMS threshold.")
+add_arg('parallel', bool, False, "Whether use parallel training.")
+add_arg('use_gpu', bool, False, "Whether use GPU.")
 add_arg('data_dir', str, './data/COCO17', "Root path of data")
 add_arg('train_file_list', str, 'annotations/instances_train2017.json',
         "train file list")
@@ -26,7 +28,7 @@
 
 add_arg('dataset', str, 'coco', "coco or pascalvoc")
 add_arg(
-    'is_toy', int, 0,
+    'is_toy', int, 4,
     "Is Toy for quick debug, 0 means using all data, while n means using only n sample"
 )
 add_arg('label_file', str, 'label_list',
@@ -54,7 +56,7 @@ def train(args,
           init_model_path=None):
     image_shape = [3, data_args.resize_h, data_args.resize_w]
     if data_args.dataset == 'coco':
-        num_classes = 81
+        num_classes = 91
     elif data_args.dataset == 'pascalvoc':
         num_classes = 21
 
@@ -65,6 +67,10 @@ def train(args,
         name='gt_label', shape=[1], dtype='int32', lod_level=1)
     difficult = fluid.layers.data(
         name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
+    gt_iscrowd = fluid.layers.data(
+        name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)
+    gt_image_info = fluid.layers.data(
+        name='gt_image_id', shape=[3], dtype='int32', lod_level=1)
 
     if args.parallel:
         places = fluid.layers.get_places()
@@ -79,7 +85,7 @@ def train(args,
             loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_, box,
                                          box_var)
             nmsed_out = fluid.layers.detection_output(
-                locs, confs, box, box_var, nms_threshold=0.45)
+                locs, confs, box, box_var, nms_threshold=args.nms_threshold)
             loss = fluid.layers.reduce_sum(loss)
             pd.write_output(loss)
             pd.write_output(nmsed_out)
@@ -89,7 +95,7 @@ def train(args,
     else:
         locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
         nmsed_out = fluid.layers.detection_output(
-            locs, confs, box, box_var, nms_threshold=0.45)
+            locs, confs, box, box_var, nms_threshold=args.nms_threshold)
         loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
                                      box_var)
         loss = fluid.layers.reduce_sum(loss)
@@ -131,19 +137,74 @@ def train(args,
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
         reader.test(data_args, val_file_list), batch_size=batch_size)
-    feeder = fluid.DataFeeder(
-        place=place, feed_list=[image, gt_box, gt_label, difficult])
+    if data_args.dataset == 'coco':
+        feeder = fluid.DataFeeder(
+            place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
+    elif data_args.dataset == 'pascalvoc':
+        feeder = fluid.DataFeeder(
+            place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test(pass_id):
-        _, accum_map = map_eval.get_map_var()
-        map_eval.reset(exe)
-        test_map = None
-        for _, data in enumerate(test_reader()):
-            test_map = exe.run(test_program,
-                               feed=feeder.feed(data),
-                               fetch_list=[accum_map])
-        print("Test {0}, map {1}".format(pass_id, test_map[0]))
-
+        if data_args.dataset == 'coco':
+            dts_res = []
+            import json
+
+            for batch_id, data in enumerate(test_reader()):
+                nmsed_out_v = exe.run(fluid.default_main_program(),
+                                        feed=feeder.feed(data),
+                                        fetch_list=[nmsed_out],
+                                        return_numpy=False)
+                lod = nmsed_out_v[0].lod()[0]
+                nmsed_out_v = np.array(nmsed_out_v[0])
+                real_batch_size = min(batch_size, len(data))
+                assert (len(lod) == real_batch_size + 1), \
+                "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
+                k = 0
+                for i in range(real_batch_size):
+                    dt_num_this_img = lod[i + 1] - lod[i]
+                    image_id = int(data[i][4][0])
+                    image_width = int(data[i][4][1])
+                    image_height = int(data[i][4][2])
+                    for j in range(dt_num_this_img):
+                        dt = nmsed_out_v[k]
+                        k = k + 1
+                        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+                        xmin = max(min(xmin, 1.0), 0.0) * image_width
+                        ymin = max(min(ymin, 1.0), 0.0) * image_height
+                        xmax = max(min(xmax, 1.0), 0.0) * image_width
+                        ymax = max(min(ymax, 1.0), 0.0) * image_height
+                        w = xmax - xmin
+                        h = ymax - ymin
+                        bbox = [xmin, ymin, w, h]
+                        dt_res = {
+                            'image_id' : image_id,
+                            'category_id' : category_id,
+                            'bbox' : bbox,
+                            'score' : score
+                        }
+                        dts_res.append(dt_res)
+                with open("detection_result.json", 'w') as outfile:
+                    json.dump(dts_res, outfile)
+                print("start evaluate using coco api")
+                from pycocotools.coco import COCO
+                from pycocotools.cocoeval import COCOeval
+                cocoGt=COCO(os.path.join(args.data_dir,args.val_file_list))
+                cocoDt=cocoGt.loadRes("detection_result.json")
+                cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
+                cocoEval.evaluate()
+                cocoEval.accumulate()
+                cocoEval.summarize()
+
+        elif data_args.dataset == 'pascalvoc':
+            _, accum_map = map_eval.get_map_var()
+            map_eval.reset(exe)
+            test_map = None
+            for _, data in enumerate(test_reader()):
+                test_map = exe.run(test_program,
+                                   feed=feeder.feed(data),
+                                   fetch_list=[accum_map])
+            print("Test {0}, map {1}".format(pass_id, test_map[0]))
+    test(-1)
     for pass_id in range(num_passes):
         start_time = time.time()
         prev_start_time = start_time

From 8c64c9cd733cd6a14a62f98bdf8c416d23d15c67 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 13 Apr 2018 16:57:13 +0800
Subject: [PATCH 18/40] adding coco year version args: 2014 or 2017

---
 fluid/object_detection/reader.py | 26 +++++++++++-----------
 fluid/object_detection/train.py  | 37 +++++++++++++++++---------------
 2 files changed, 33 insertions(+), 30 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 3d4823e82b..a05c4b479c 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -30,7 +30,7 @@ def __init__(self, dataset, toy, data_dir, label_file, resize_h, resize_w,
         self._dataset = dataset
         self._toy = toy
         self._data_dir = data_dir
-        if dataset == "pascalvoc":
+        if 'pascalvoc' in dataset:
             self._label_list = []
             label_fpath = os.path.join(data_dir, label_file)
             for line in open(label_fpath):
@@ -96,7 +96,7 @@ def img_mean(self):
 
 def _reader_creator(settings, file_list, mode, shuffle):
     def reader():
-        if settings.dataset == 'coco':
+        if 'coco'in settings.dataset:
             # cocoapi 
             from pycocotools.coco import COCO
             from pycocotools.cocoeval import COCOeval
@@ -108,7 +108,7 @@ def reader():
             category_names = [
                 item['name'] for item in coco.loadCats(category_ids)
             ]
-        elif settings.dataset == 'pascalvoc':
+        elif 'pascalvoc' in settings.dataset:
             flist = open(file_list)
             images = [line.strip() for line in flist]
 
@@ -122,10 +122,10 @@ def reader():
             random.shuffle(images)
 
         for image in images:
-            if settings.dataset == 'coco':
+            if 'coco'in settings.dataset:
                 image_name = image['file_name']
                 image_path = os.path.join(settings.data_dir, image_name)
-            elif settings.dataset == 'pascalvoc':
+            elif 'pascalvoc' in settings.dataset:
                 if mode == 'train' or mode == 'test':
                     image_path, label_path = image.split()
                     image_path = os.path.join(settings.data_dir, image_path)
@@ -140,7 +140,7 @@ def reader():
             img_id = image['id']
 
             if mode == 'train' or mode == 'test':
-                if settings.dataset == 'coco':
+                if 'coco'in settings.dataset:
                     # layout: label | xmin | ymin | xmax | ymax | iscrowd | area | image_id | category_id
                     bbox_labels = []
                     annIds = coco.getAnnIds(imgIds=image['id'])
@@ -167,7 +167,7 @@ def reader():
                         #bbox_sample.append(ann['bbox'])
                         #bbox_sample.append(ann['segmentation'])
                         bbox_labels.append(bbox_sample)
-                elif settings.dataset == 'pascalvoc':
+                elif 'pascalvoc' in settings.dataset:
                     # layout: label | xmin | ymin | xmax | ymax | difficult
                     bbox_labels = []
                     root = xml.etree.ElementTree.parse(label_path).getroot()
@@ -254,13 +254,13 @@ def reader():
                 sample_labels = np.array(sample_labels)
                 if len(sample_labels) == 0:
                     continue
-                if settings.dataset == 'coco':
+                if 'coco'in settings.dataset:
                     yield img.astype('float32'), \
                         sample_labels[:, 1:5], \
                         sample_labels[:, 0].astype('int32'), \
                         sample_labels[:, 5].astype('int32'), \
                         [img_id, img_width, img_height]
-                elif settings.dataset == 'pascalvoc':
+                elif 'pascalvoc' in settings.dataset:
                     yield img.astype(
                         'float32'
                     ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
@@ -304,7 +304,7 @@ def draw_bounding_box_on_image(image,
 
 def train(settings, file_list, shuffle=True):
     file_list = os.path.join(settings.data_dir, file_list)
-    if settings.dataset == 'coco':
+    if 'coco'in settings.dataset:
         train_settings = copy.copy(settings)
         if '2014' in file_list:
             sub_dir = "train2014"
@@ -312,13 +312,13 @@ def train(settings, file_list, shuffle=True):
             sub_dir = "train2017"
         train_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
         return _reader_creator(train_settings, file_list, 'train', shuffle)
-    elif settings.dataset == 'pascalvoc':
+    elif 'pascalvoc' in settings.dataset:
         return _reader_creator(settings, file_list, 'train', shuffle)
 
 
 def test(settings, file_list):
     file_list = os.path.join(settings.data_dir, file_list)
-    if settings.dataset == 'coco':
+    if 'coco'in settings.dataset:
         test_settings = copy.copy(settings)
         if '2014' in file_list:
             sub_dir = "val2014"
@@ -326,7 +326,7 @@ def test(settings, file_list):
             sub_dir = "val2017"
         test_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
         return _reader_creator(test_settings, file_list, 'test', False)
-    elif settings.dataset == 'pascalvoc':
+    elif 'pascalvoc' in settings.dataset:
         return _reader_creator(settings, file_list, 'test', False)
 
 
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 388c9efe2a..d7710f7752 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -17,10 +17,8 @@
 add_arg('learning_rate',    float, 0.001,     "Learning rate.")
 add_arg('batch_size',       int,   32,        "Minibatch size.")
 add_arg('num_passes',       int,   25,        "Epoch number.")
-add_arg('parallel',         bool,  True,      "Whether use parallel training.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
-add_arg('use_nccl',         bool,  False,     "Whether use NCCL.")
-add_arg('dataset',          str, 'pascalvoc', "coco or pascalvoc.")
+add_arg('dataset',          str, 'coco2014',  "coco2014, coco2017, and pascalvoc.")
 add_arg('model_save_dir',   str, 'model',     "The path to save model.")
 add_arg('pretrained_model', str, 'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
 add_arg('apply_distort',    bool, True,   "Whether apply distort")
@@ -43,9 +41,9 @@ def parallel_exe(args,
                  model_save_dir,
                  pretrained_model=None):
     image_shape = [3, data_args.resize_h, data_args.resize_w]
-    if data_args.dataset == 'coco':
+    if 'coco' in data_args.dataset:
         num_classes = 91
-    elif data_args.dataset == 'pascalvoc':
+    elif 'pascalvoc' in data_args.dataset:
         num_classes = 21
 
     image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
@@ -79,13 +77,13 @@ def parallel_exe(args,
             evaluate_difficult=False,
             ap_version='integral')
 
-    if data_args.dataset == 'coco':
+    if 'coco' in data_args.dataset:
         # learning rate decay in 12, 19 pass, respectively
-        if '2014' in train_file_list:
+        if '2014' in data_args.dataset:
             boundaries = [82783 / batch_size * 12, 82783 / batch_size * 19]
-        elif '2017' in train_file_list:
+        elif '2017' in data_args.dataset:
             boundaries = [118287 / batch_size * 12, 118287 / batch_size * 19]
-    elif data_args.dataset == 'pascalvoc':
+    elif 'pascalvoc' in data_args.dataset:
         boundaries = [40000, 60000]
     values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25]
     optimizer = fluid.optimizer.RMSProp(
@@ -110,15 +108,15 @@ def if_exist(var):
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
         reader.test(data_args, val_file_list), batch_size=batch_size)
-    if data_args.dataset == 'coco':
+    if 'coco' in data_args.dataset:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
-    elif data_args.dataset == 'pascalvoc':
+    elif 'pascalvoc' in data_args.dataset:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test(pass_id):
-        if data_args.dataset == 'coco':
+        if 'coco' in data_args.dataset:
             dts_res = []
             import json
 
@@ -169,7 +167,7 @@ def test(pass_id):
             cocoEval.accumulate()
             cocoEval.summarize()
 
-        elif data_args.dataset == 'pascalvoc':
+        elif 'pascalvoc' in data_args.dataset:
             _, accum_map = map_eval.get_map_var()
             map_eval.reset(exe)
             test_map = None
@@ -209,10 +207,15 @@ def test(pass_id):
     val_file_list = 'test.txt'
     label_file = 'label_list'
     model_save_dir = args.model_save_dir
-    if args.dataset == 'coco':
-        data_dir = './data/COCO17'
-        train_file_list = 'annotations/instances_train2017.json'
-        val_file_list = 'annotations/instances_val2017.json'
+    if 'coco' in args.dataset:
+        if '2014' in args.dataset:
+            data_dir = './data/coco'
+            train_file_list = 'annotations/instances_train2014.json'
+            val_file_list = 'annotations/instances_val2014.json'
+        elif '2017' in args.dataset:
+            data_dir = './data/COCO17'
+            train_file_list = 'annotations/instances_train2017.json'
+            val_file_list = 'annotations/instances_val2017.json'
 
     data_args = reader.Settings(
         dataset=args.dataset,

From 61adbdbfcf0c4b78d5a4e7bd157418d2fae5c2c7 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 13 Apr 2018 20:23:23 +0800
Subject: [PATCH 19/40] add coco dataset download, and README.md

---
 fluid/object_detection/README.md             |  6 +++---
 fluid/object_detection/data/coco/download.sh | 20 ++++++++++++++++++++
 fluid/object_detection/train.py              | 20 ++++++++++----------
 3 files changed, 33 insertions(+), 13 deletions(-)
 create mode 100644 fluid/object_detection/data/coco/download.sh

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 67eccaed73..504856fc57 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -6,7 +6,7 @@ The minimum PaddlePaddle version needed for the code sample in this directory is
 
 ### Introduction
 
-[Single Shot MultiBox Detector (SSD)](https://arxiv.org/abs/1512.02325) framework for object detection is based on a feed-forward convolutional network. The early network is a standard convolutional architecture for image classification, such as VGG, ResNet, or MobileNet, which is als called base network. In this tutorial we used [MobileNet](https://arxiv.org/abs/1704.04861).
+[Single Shot MultiBox Detector (SSD)](https://arxiv.org/abs/1512.02325) framework for object detection is based on a feed-forward convolutional network. The early network is a standard convolutional architecture for image classification, such as VGG, ResNet, or MobileNet, which is also called base network. In this tutorial we used [MobileNet](https://arxiv.org/abs/1704.04861).
 
 ### Data Preparation
 
@@ -63,11 +63,11 @@ Declaration: the MobileNet-v1 SSD model is converted by [TensorFlow model](https
 #### Train on MS-COCO
   - Train on one device (/GPU).
   ```python
-  env CUDA_VISIABLE_DEVICES=0 python -u train.py --parallel=False --data='coco' --pretrained_model='pretrained/mobilenet_imagenet/'
+  env CUDA_VISIABLE_DEVICES=0 python -u train.py --parallel=False --data='coco2014' --pretrained_model='pretrained/mobilenet_imagenet/'
   ```
   - Train on multi devices (/GPUs).
   ```python
-  env CUDA_VISIABLE_DEVICES=0,1 python -u train.py --batch_size=64 --data='coco' --pretrained_model='pretrained/mobilenet_imagenet/'
+  env CUDA_VISIABLE_DEVICES=0,1 python -u train.py --batch_size=64 --data='coco2014' --pretrained_model='pretrained/mobilenet_imagenet/'
   ```
 
 TBD
diff --git a/fluid/object_detection/data/coco/download.sh b/fluid/object_detection/data/coco/download.sh
new file mode 100644
index 0000000000..cd6e18c8e5
--- /dev/null
+++ b/fluid/object_detection/data/coco/download.sh
@@ -0,0 +1,20 @@
+DIR="$( cd "$(dirname "$0")" ; pwd -P )"
+cd "$DIR"
+
+# Download the data.
+echo "Downloading..."
+wget http://images.cocodataset.org/zips/train2014.zip
+wget http://images.cocodataset.org/zips/val2014.zip
+wget http://images.cocodataset.org/zips/train2017.zip
+wget http://images.cocodataset.org/zips/val2017.zip
+wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip
+wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
+# Extract the data.
+echo "Extractint..."
+unzip train2014.tar
+unzip val2014.tar
+unzip train2017.tar
+unzip val2017.tar
+unzip annotations_trainval2014.tar
+unzip annotations_trainval2017.tar
+
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index d7710f7752..ed8b4c2f91 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -18,17 +18,18 @@
 add_arg('batch_size',       int,   32,        "Minibatch size.")
 add_arg('num_passes',       int,   25,        "Epoch number.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
-add_arg('dataset',          str, 'coco2014',  "coco2014, coco2017, and pascalvoc.")
-add_arg('model_save_dir',   str, 'model',     "The path to save model.")
-add_arg('pretrained_model', str, 'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
-add_arg('apply_distort',    bool, True,   "Whether apply distort")
-add_arg('apply_expand',     bool, False,  "Whether appley expand")
-add_arg('resize_h',         int,  300,    "resize image size")
-add_arg('resize_w',         int,  300,    "resize image size")
+add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017, and pascalvoc.")
+add_arg('model_save_dir',   str,   'model',     "The path to save model.")
+add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
+add_arg('apply_distort',    bool,  True,   "Whether apply distort")
+add_arg('apply_expand',     bool,  False,  "Whether appley expand")
+add_arg('nms_threshold',    float, 0.5,    "resize image size")
+add_arg('resize_h',         int,   300,    "resize image size")
+add_arg('resize_w',         int,   300,    "resize image size")
 add_arg('mean_value_B',     float, 127.5, "mean value which will be subtracted")  #123.68
 add_arg('mean_value_G',     float, 127.5, "mean value which will be subtracted")  #116.78
 add_arg('mean_value_R',     float, 127.5, "mean value which will be subtracted")  #103.94
-add_arg('is_toy',           int, 0, "Toy for quick debug, 0 means using all data, while n means using only n sample")
+add_arg('is_toy',           int,   0, "Toy for quick debug, 0 means using all data, while n means using only n sample")
 # yapf: disable
 
 def parallel_exe(args,
@@ -208,12 +209,11 @@ def test(pass_id):
     label_file = 'label_list'
     model_save_dir = args.model_save_dir
     if 'coco' in args.dataset:
+        data_dir = './data/coco'
         if '2014' in args.dataset:
-            data_dir = './data/coco'
             train_file_list = 'annotations/instances_train2014.json'
             val_file_list = 'annotations/instances_val2014.json'
         elif '2017' in args.dataset:
-            data_dir = './data/COCO17'
             train_file_list = 'annotations/instances_train2017.json'
             val_file_list = 'annotations/instances_val2017.json'
 

From b016bc1ebc34233d27381c5ccc83f951d0f8ddf7 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Mon, 16 Apr 2018 16:35:12 +0800
Subject: [PATCH 20/40] fix

---
 fluid/object_detection/reader.py |  17 ++--
 fluid/object_detection/train.py  | 129 ++++++++++++++++++++++++++++++-
 2 files changed, 136 insertions(+), 10 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index a05c4b479c..22a39b68fe 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -96,7 +96,7 @@ def img_mean(self):
 
 def _reader_creator(settings, file_list, mode, shuffle):
     def reader():
-        if 'coco'in settings.dataset:
+        if 'coco' in settings.dataset:
             # cocoapi 
             from pycocotools.coco import COCO
             from pycocotools.cocoeval import COCOeval
@@ -122,7 +122,7 @@ def reader():
             random.shuffle(images)
 
         for image in images:
-            if 'coco'in settings.dataset:
+            if 'coco' in settings.dataset:
                 image_name = image['file_name']
                 image_path = os.path.join(settings.data_dir, image_name)
             elif 'pascalvoc' in settings.dataset:
@@ -140,7 +140,7 @@ def reader():
             img_id = image['id']
 
             if mode == 'train' or mode == 'test':
-                if 'coco'in settings.dataset:
+                if 'coco' in settings.dataset:
                     # layout: label | xmin | ymin | xmax | ymax | iscrowd | area | image_id | category_id
                     bbox_labels = []
                     annIds = coco.getAnnIds(imgIds=image['id'])
@@ -148,9 +148,8 @@ def reader():
                     for ann in anns:
                         bbox_sample = []
                         # start from 1, leave 0 to background
-                        bbox_sample.append(
-                            float(ann['category_id']))
-                            #float(category_ids.index(ann['category_id'])) + 1)
+                        bbox_sample.append(float(ann['category_id']))
+                        #float(category_ids.index(ann['category_id'])) + 1)
                         bbox = ann['bbox']
                         xmin, ymin, w, h = bbox
                         xmax = xmin + w
@@ -254,7 +253,7 @@ def reader():
                 sample_labels = np.array(sample_labels)
                 if len(sample_labels) == 0:
                     continue
-                if 'coco'in settings.dataset:
+                if 'coco' in settings.dataset:
                     yield img.astype('float32'), \
                         sample_labels[:, 1:5], \
                         sample_labels[:, 0].astype('int32'), \
@@ -304,7 +303,7 @@ def draw_bounding_box_on_image(image,
 
 def train(settings, file_list, shuffle=True):
     file_list = os.path.join(settings.data_dir, file_list)
-    if 'coco'in settings.dataset:
+    if 'coco' in settings.dataset:
         train_settings = copy.copy(settings)
         if '2014' in file_list:
             sub_dir = "train2014"
@@ -318,7 +317,7 @@ def train(settings, file_list, shuffle=True):
 
 def test(settings, file_list):
     file_list = os.path.join(settings.data_dir, file_list)
-    if 'coco'in settings.dataset:
+    if 'coco' in settings.dataset:
         test_settings = copy.copy(settings)
         if '2014' in file_list:
             sub_dir = "val2014"
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index ed8b4c2f91..a469fe516b 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -23,7 +23,7 @@
 add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
 add_arg('apply_distort',    bool,  True,   "Whether apply distort")
 add_arg('apply_expand',     bool,  False,  "Whether appley expand")
-add_arg('nms_threshold',    float, 0.5,    "resize image size")
+add_arg('nms_threshold',    float, 0.5,    "nms threshold")
 add_arg('resize_h',         int,   300,    "resize image size")
 add_arg('resize_w',         int,   300,    "resize image size")
 add_arg('mean_value_B',     float, 127.5, "mean value which will be subtracted")  #123.68
@@ -32,6 +32,133 @@
 add_arg('is_toy',           int,   0, "Toy for quick debug, 0 means using all data, while n means using only n sample")
 # yapf: disable
 
+
+def parallel_do(args,
+                train_file_list,
+                val_file_list,
+                data_args,
+                learning_rate,
+                batch_size,
+                num_passes,
+                model_save_dir,
+                pretrained_model=None):
+    image_shape = [3, data_args.resize_h, data_args.resize_w]
+    if data_args.dataset == 'coco':
+        num_classes = 81
+    elif data_args.dataset == 'pascalvoc':
+        num_classes = 21
+
+    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
+    gt_box = fluid.layers.data(
+        name='gt_box', shape=[4], dtype='float32', lod_level=1)
+    gt_label = fluid.layers.data(
+        name='gt_label', shape=[1], dtype='int32', lod_level=1)
+    difficult = fluid.layers.data(
+        name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
+
+    if args.parallel:
+        places = fluid.layers.get_places()
+        pd = fluid.layers.ParallelDo(places, use_nccl=args.use_nccl)
+        with pd.do():
+            image_ = pd.read_input(image)
+            gt_box_ = pd.read_input(gt_box)
+            gt_label_ = pd.read_input(gt_label)
+            difficult_ = pd.read_input(difficult)
+            locs, confs, box, box_var = mobile_net(num_classes, image_,
+                                                   image_shape)
+            loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_, box,
+                                         box_var)
+            nmsed_out = fluid.layers.detection_output(
+                locs, confs, box, box_var, nms_threshold=0.45)
+            loss = fluid.layers.reduce_sum(loss)
+            pd.write_output(loss)
+            pd.write_output(nmsed_out)
+
+        loss, nmsed_out = pd()
+        loss = fluid.layers.mean(loss)
+    else:
+        locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
+        nmsed_out = fluid.layers.detection_output(
+            locs, confs, box, box_var, nms_threshold=0.45)
+        loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
+                                     box_var)
+        loss = fluid.layers.reduce_sum(loss)
+
+    test_program = fluid.default_main_program().clone(for_test=True)
+    with fluid.program_guard(test_program):
+        map_eval = fluid.evaluator.DetectionMAP(
+            nmsed_out,
+            gt_label,
+            gt_box,
+            difficult,
+            num_classes,
+            overlap_threshold=0.5,
+            evaluate_difficult=False,
+            ap_version='integral')
+
+    if data_args.dataset == 'coco':
+        # learning rate decay in 12, 19 pass, respectively
+        if '2014' in train_file_list:
+            boundaries = [82783 / batch_size * 12, 82783 / batch_size * 19]
+        elif '2017' in train_file_list:
+            boundaries = [118287 / batch_size * 12, 118287 / batch_size * 19]
+    elif data_args.dataset == 'pascalvoc':
+        boundaries = [40000, 60000]
+    values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25]
+    optimizer = fluid.optimizer.RMSProp(
+        learning_rate=fluid.layers.piecewise_decay(boundaries, values),
+        regularization=fluid.regularizer.L2Decay(0.00005), )
+
+    optimizer.minimize(loss)
+
+    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
+    exe = fluid.Executor(place)
+    exe.run(fluid.default_startup_program())
+
+    if pretrained_model:
+        def if_exist(var):
+            return os.path.exists(os.path.join(pretrained_model, var.name))
+        fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
+
+    train_reader = paddle.batch(
+        reader.train(data_args, train_file_list), batch_size=batch_size)
+    test_reader = paddle.batch(
+        reader.test(data_args, val_file_list), batch_size=batch_size)
+    feeder = fluid.DataFeeder(
+        place=place, feed_list=[image, gt_box, gt_label, difficult])
+
+    def test(pass_id):
+        _, accum_map = map_eval.get_map_var()
+        map_eval.reset(exe)
+        test_map = None
+        for _, data in enumerate(test_reader()):
+            test_map = exe.run(test_program,
+                               feed=feeder.feed(data),
+                               fetch_list=[accum_map])
+        print("Test {0}, map {1}".format(pass_id, test_map[0]))
+
+    for pass_id in range(num_passes):
+        start_time = time.time()
+        prev_start_time = start_time
+        end_time = 0
+        for batch_id, data in enumerate(train_reader()):
+            prev_start_time = start_time
+            start_time = time.time()
+            loss_v = exe.run(fluid.default_main_program(),
+                             feed=feeder.feed(data),
+                             fetch_list=[loss])
+            end_time = time.time()
+            if batch_id % 20 == 0:
+                print("Pass {0}, batch {1}, loss {2}, time {3}".format(
+                    pass_id, batch_id, loss_v[0], start_time - prev_start_time))
+        test(pass_id)
+
+        if pass_id % 10 == 0 or pass_id == num_passes - 1:
+            model_path = os.path.join(model_save_dir, str(pass_id))
+            print 'save models to %s' % (model_path)
+            fluid.io.save_persistables(exe, model_path)
+
+
 def parallel_exe(args,
                  train_file_list,
                  val_file_list,

From 9e3102321ac5c07c8e8a935a32d13c9b6eeb8220 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Tue, 17 Apr 2018 21:10:27 +0800
Subject: [PATCH 21/40] fix image truncted IOError, map version error

---
 fluid/object_detection/image_util.py |  2 ++
 fluid/object_detection/reader.py     | 10 +++++++---
 fluid/object_detection/train.py      | 22 ++++++++++++++--------
 3 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/fluid/object_detection/image_util.py b/fluid/object_detection/image_util.py
index e2b6630205..ed801852ea 100644
--- a/fluid/object_detection/image_util.py
+++ b/fluid/object_detection/image_util.py
@@ -1,8 +1,10 @@
 from PIL import Image, ImageEnhance, ImageDraw
+from PIL import ImageFile
 import numpy as np
 import random
 import math
 
+ImageFile.LOAD_TRUNCATED_IMAGES = True #otherwise IOError raised image file is truncated
 
 class sampler():
     def __init__(self, max_sample, max_trial, min_scale, max_scale,
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 22a39b68fe..3e2f48258a 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -25,9 +25,10 @@
 
 
 class Settings(object):
-    def __init__(self, dataset, toy, data_dir, label_file, resize_h, resize_w,
+    def __init__(self, dataset, map_version, toy, data_dir, label_file, resize_h, resize_w,
                  mean_value, apply_distort, apply_expand):
         self._dataset = dataset
+        self._map_version = map_version
         self._toy = toy
         self._data_dir = data_dir
         if 'pascalvoc' in dataset:
@@ -58,6 +59,9 @@ def dataset(self):
         return self._dataset
 
     @property
+    def map_version(self):
+        return self._map_version
+    @property
     def toy(self):
         return self._toy
 
@@ -253,13 +257,13 @@ def reader():
                 sample_labels = np.array(sample_labels)
                 if len(sample_labels) == 0:
                     continue
-                if 'coco' in settings.dataset:
+                if 'cocoMAP' in settings.map_version:
                     yield img.astype('float32'), \
                         sample_labels[:, 1:5], \
                         sample_labels[:, 0].astype('int32'), \
                         sample_labels[:, 5].astype('int32'), \
                         [img_id, img_width, img_height]
-                elif 'pascalvoc' in settings.dataset:
+                else:
                     yield img.astype(
                         'float32'
                     ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index a469fe516b..4eb4ab45b7 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -2,7 +2,6 @@
 import paddle.fluid as fluid
 import reader
 import image_util
-import load_model as load_model
 from mobilenet_ssd import mobile_net
 from utility import add_arguments, print_arguments
 import os
@@ -24,6 +23,7 @@
 add_arg('apply_distort',    bool,  True,   "Whether apply distort")
 add_arg('apply_expand',     bool,  False,  "Whether appley expand")
 add_arg('nms_threshold',    float, 0.5,    "nms threshold")
+add_arg('map_version',      str,   'integral',   "integral, 11points, and cocoMAP")
 add_arg('resize_h',         int,   300,    "resize image size")
 add_arg('resize_w',         int,   300,    "resize image size")
 add_arg('mean_value_B',     float, 127.5, "mean value which will be subtracted")  #123.68
@@ -236,15 +236,15 @@ def if_exist(var):
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
         reader.test(data_args, val_file_list), batch_size=batch_size)
-    if 'coco' in data_args.dataset:
+    if 'cocoMAP' in data_args.map_version:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
-    elif 'pascalvoc' in data_args.dataset:
+    else:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test(pass_id):
-        if 'coco' in data_args.dataset:
+        if 'cocoMAP' in data_args.map_version:
             dts_res = []
             import json
 
@@ -253,6 +253,9 @@ def test(pass_id):
                                         feed=feeder.feed(data),
                                         fetch_list=[nmsed_out],
                                         return_numpy=False)
+                if batch_id % 20 == 0:
+                    print("Batch {0}".format(batch_id))
+
                 lod = nmsed_out_v[0].lod()[0]
                 nmsed_out_v = np.array(nmsed_out_v[0])
                 real_batch_size = min(batch_size, len(data))
@@ -295,21 +298,23 @@ def test(pass_id):
             cocoEval.accumulate()
             cocoEval.summarize()
 
-        elif 'pascalvoc' in data_args.dataset:
+        else:
             _, accum_map = map_eval.get_map_var()
             map_eval.reset(exe)
             test_map = None
-            for _, data in enumerate(test_reader()):
+            for batch_id, data in enumerate(test_reader()):
                 test_map = exe.run(test_program,
                                    feed=feeder.feed(data),
                                    fetch_list=[accum_map])
+                if batch_id % 20 == 0:
+                    print("Batch {0}".format(batch_id))
             print("Test {0}, map {1}".format(pass_id, test_map[0]))
+    #test(-1)
 
     for pass_id in range(num_passes):
         start_time = time.time()
         prev_start_time = start_time
         end_time = 0
-        test(pass_id)
         for batch_id, data in enumerate(train_reader()):
             prev_start_time = start_time
             start_time = time.time()
@@ -339,13 +344,14 @@ def test(pass_id):
         data_dir = './data/coco'
         if '2014' in args.dataset:
             train_file_list = 'annotations/instances_train2014.json'
-            val_file_list = 'annotations/instances_val2014.json'
+            val_file_list = 'annotations/instances_minival2014.json'
         elif '2017' in args.dataset:
             train_file_list = 'annotations/instances_train2017.json'
             val_file_list = 'annotations/instances_val2017.json'
 
     data_args = reader.Settings(
         dataset=args.dataset,
+        map_version = args.map_version,
         toy=args.is_toy,
         data_dir=data_dir,
         label_file=label_file,

From 26063fd06084a0a8c00c61cf2bbd8c4f780dc88e Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 10:56:43 +0800
Subject: [PATCH 22/40] add test config

---
 fluid/object_detection/train.py | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 4eb4ab45b7..a2da62eb86 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -15,11 +15,13 @@
 # yapf: disable
 add_arg('learning_rate',    float, 0.001,     "Learning rate.")
 add_arg('batch_size',       int,   32,        "Minibatch size.")
-add_arg('num_passes',       int,   25,        "Epoch number.")
+#add_arg('num_passes',       int,   25,        "Epoch number.")
+add_arg('num_passes',       int,   0,        "Epoch number.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
 add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017, and pascalvoc.")
 add_arg('model_save_dir',   str,   'model',     "The path to save model.")
-add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
+#add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
+add_arg('pretrained_model', str,   'train_coco_pre/24/', "The init model path.")
 add_arg('apply_distort',    bool,  True,   "Whether apply distort")
 add_arg('apply_expand',     bool,  False,  "Whether appley expand")
 add_arg('nms_threshold',    float, 0.5,    "nms threshold")
@@ -309,7 +311,7 @@ def test(pass_id):
                 if batch_id % 20 == 0:
                     print("Batch {0}".format(batch_id))
             print("Test {0}, map {1}".format(pass_id, test_map[0]))
-    #test(-1)
+    test(-1)
 
     for pass_id in range(num_passes):
         start_time = time.time()
@@ -325,6 +327,7 @@ def test(pass_id):
             if batch_id % 20 == 0:
                 print("Pass {0}, batch {1}, loss {2}, time {3}".format(
                     pass_id, batch_id, loss_v, start_time - prev_start_time))
+	test(pass_id)
 
         if pass_id % 10 == 0 or pass_id == num_passes - 1:
             model_path = os.path.join(model_save_dir, str(pass_id))

From 863415d68c58d76887fe0daaf8c0637e394c9d8a Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 15:16:16 +0800
Subject: [PATCH 23/40] =?UTF-8?q?add=20eval.py=20for=20evaluate=20trained?=
 =?UTF-8?q?=20model=C2=A0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 fluid/object_detection/eval.py  | 185 ++++++++++++++++++++++++++++++++
 fluid/object_detection/train.py |   4 +-
 2 files changed, 187 insertions(+), 2 deletions(-)
 create mode 100644 fluid/object_detection/eval.py

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
new file mode 100644
index 0000000000..18fbb48c0c
--- /dev/null
+++ b/fluid/object_detection/eval.py
@@ -0,0 +1,185 @@
+import os
+import time
+import numpy as np
+import argparse
+import functools
+
+import paddle
+import paddle.fluid as fluid
+import reader
+from mobilenet_ssd import mobile_net
+from utility import add_arguments, print_arguments
+
+parser = argparse.ArgumentParser(description=__doc__)
+add_arg = functools.partial(add_arguments, argparser=parser)
+# yapf: disable
+add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017, and pascalvoc.")
+add_arg('batch_size',       int,   32,        "Minibatch size.")
+add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
+add_arg('data_dir',         str,   '',        "The data root path.")
+add_arg('test_list',        str,   '',        "The testing data lists.")
+add_arg('model_dir',        str,   '',     "The path to save model.")
+add_arg('nms_threshold',    float, 0.5,    "nms threshold")
+add_arg('map_version',      str,   'integral',   "integral, 11points, and cocoMAP")
+add_arg('resize_h',         int,   300,    "resize image size")
+add_arg('resize_w',         int,   300,    "resize image size")
+add_arg('mean_value_B',     float, 127.5, "mean value for B channel which will be subtracted")  #123.68
+add_arg('mean_value_G',     float, 127.5, "mean value for G channel which will be subtracted")  #116.78
+add_arg('mean_value_R',     float, 127.5, "mean value for R channel which will be subtracted")  #103.94
+# yapf: enable
+
+
+def eval(args, data_args, test_list, batch_size, model_dir=None):
+    image_shape = [3, data_args.resize_h, data_args.resize_w]
+    if 'coco' in data_args.dataset:
+        num_classes = 91
+    elif 'pascalvoc' in data_args.dataset:
+        num_classes = 21
+
+    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
+    gt_box = fluid.layers.data(
+        name='gt_box', shape=[4], dtype='float32', lod_level=1)
+    gt_label = fluid.layers.data(
+        name='gt_label', shape=[1], dtype='int32', lod_level=1)
+    difficult = fluid.layers.data(
+        name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
+    gt_iscrowd = fluid.layers.data(
+        name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)
+    gt_image_info = fluid.layers.data(
+        name='gt_image_id', shape=[3], dtype='int32', lod_level=1)
+
+    locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
+    nmsed_out = fluid.layers.detection_output(
+        locs, confs, box, box_var, nms_threshold=args.nms_threshold)
+    loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
+    loss = fluid.layers.reduce_sum(loss)
+
+    test_program = fluid.default_main_program().clone(for_test=True)
+    with fluid.program_guard(test_program):
+        map_eval = fluid.evaluator.DetectionMAP(
+            nmsed_out,
+            gt_label,
+            gt_box,
+            difficult,
+            num_classes,
+            overlap_threshold=0.5,
+            evaluate_difficult=False,
+            ap_version=args.ap_version)
+
+    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
+    exe = fluid.Executor(place)
+
+    if model_dir:
+
+        def if_exist(var):
+            return os.path.exists(os.path.join(model_dir, var.name))
+
+        fluid.io.load_vars(exe, model_dir, predicate=if_exist)
+
+    test_reader = paddle.batch(
+        reader.test(data_args, test_list), batch_size=batch_size)
+    if 'cocoMAP' in data_args.map_version:
+        feeder = fluid.DataFeeder(
+            place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
+    else:
+        feeder = fluid.DataFeeder(
+            place=place, feed_list=[image, gt_box, gt_label, difficult])
+
+    def test():
+        if 'cocoMAP' in data_args.map_version:
+            dts_res = []
+            import json
+
+            for batch_id, data in enumerate(test_reader()):
+                nmsed_out_v = exe.run(fluid.default_main_program(),
+                                        feed=feeder.feed(data),
+                                        fetch_list=[nmsed_out],
+                                        return_numpy=False)
+                if batch_id % 20 == 0:
+                    print("Batch {0}".format(batch_id))
+
+                lod = nmsed_out_v[0].lod()[0]
+                nmsed_out_v = np.array(nmsed_out_v[0])
+                real_batch_size = min(batch_size, len(data))
+                assert (len(lod) == real_batch_size + 1), \
+                "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
+                k = 0
+                for i in range(real_batch_size):
+                    dt_num_this_img = lod[i + 1] - lod[i]
+                    image_id = int(data[i][4][0])
+                    image_width = int(data[i][4][1])
+                    image_height = int(data[i][4][2])
+                    for j in range(dt_num_this_img):
+                        dt = nmsed_out_v[k]
+                        k = k + 1
+                        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+                        xmin = max(min(xmin, 1.0), 0.0) * image_width
+                        ymin = max(min(ymin, 1.0), 0.0) * image_height
+                        xmax = max(min(xmax, 1.0), 0.0) * image_width
+                        ymax = max(min(ymax, 1.0), 0.0) * image_height
+                        w = xmax - xmin
+                        h = ymax - ymin
+                        bbox = [xmin, ymin, w, h]
+                        dt_res = {
+                            'image_id' : image_id,
+                            'category_id' : category_id,
+                            'bbox' : bbox,
+                            'score' : score
+                        }
+                        dts_res.append(dt_res)
+            
+            with open("detection_result.json", 'w') as outfile:
+                json.dump(dts_res, outfile)
+            print("start evaluate using coco api")
+            from pycocotools.coco import COCO
+            from pycocotools.cocoeval import COCOeval
+            cocoGt=COCO(os.path.join(args.data_dir,args.val_file_list))
+            cocoDt=cocoGt.loadRes("detection_result.json")
+            cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
+            cocoEval.evaluate()
+            cocoEval.accumulate()
+            cocoEval.summarize()
+
+        else:
+            _, accum_map = map_eval.get_map_var()
+            map_eval.reset(exe)
+            for batch_id, data in enumerate(test_reader()):
+                test_map = exe.run(test_program,
+                                   feed=feeder.feed(data),
+                                   fetch_list=[accum_map])
+                if batch_id % 20 == 0:
+                    print("Batch {0}, map {1}".format(idx, test_map[0]))
+            print("Test model {0}, map {1}".format(model_dir, test_map[0]))
+    test()
+
+if __name__ == '__main__':
+    args = parser.parse_args()
+    print_arguments(args)
+
+    data_dir = 'data/pascalvoc'
+    test_list = 'test.txt'
+    label_file = 'label_list'
+    if 'coco' in args.dataset:
+        data_dir = './data/coco'
+        if '2014' in args.dataset:
+            test_list = 'annotations/instances_minival2014.json'
+        elif '2017' in args.dataset:
+            test_list = 'annotations/instances_val2017.json'
+
+    data_args = reader.Settings(
+        dataset=args.dataset,
+        map_version = args.map_version,
+        toy=0,
+        data_dir=data_dir,
+        label_file=label_file,
+        apply_distort=args.apply_distort,
+        apply_expand=args.apply_expand,
+        resize_h=args.resize_h,
+        resize_w=args.resize_w,
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
+    eval(
+        args,
+        test_list=args.test_list,
+        data_args=data_args,
+        batch_size=args.batch_size,
+        model_dir=args.model_dir)
\ No newline at end of file
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index a2da62eb86..2ca491b6d9 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -205,7 +205,7 @@ def parallel_exe(args,
             num_classes,
             overlap_threshold=0.5,
             evaluate_difficult=False,
-            ap_version='integral')
+            ap_version=args.ap_version)
 
     if 'coco' in data_args.dataset:
         # learning rate decay in 12, 19 pass, respectively
@@ -327,7 +327,7 @@ def test(pass_id):
             if batch_id % 20 == 0:
                 print("Pass {0}, batch {1}, loss {2}, time {3}".format(
                     pass_id, batch_id, loss_v, start_time - prev_start_time))
-	test(pass_id)
+    test(pass_id)
 
         if pass_id % 10 == 0 or pass_id == num_passes - 1:
             model_path = os.path.join(model_save_dir, str(pass_id))

From 58cae078f0fee2daa0f26affd574e9d438ccc9b2 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 15:25:23 +0800
Subject: [PATCH 24/40] fix

---
 fluid/object_detection/eval.py   | 12 ++++++------
 fluid/object_detection/reader.py | 10 +++++-----
 fluid/object_detection/train.py  |  8 ++++----
 3 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 18fbb48c0c..9eb7e0be45 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -20,7 +20,7 @@
 add_arg('test_list',        str,   '',        "The testing data lists.")
 add_arg('model_dir',        str,   '',     "The path to save model.")
 add_arg('nms_threshold',    float, 0.5,    "nms threshold")
-add_arg('map_version',      str,   'integral',   "integral, 11points, and cocoMAP")
+add_arg('ap_version',       str,   'integral',   "integral, 11points, and cocoMAP")
 add_arg('resize_h',         int,   300,    "resize image size")
 add_arg('resize_w',         int,   300,    "resize image size")
 add_arg('mean_value_B',     float, 127.5, "mean value for B channel which will be subtracted")  #123.68
@@ -78,7 +78,7 @@ def if_exist(var):
 
     test_reader = paddle.batch(
         reader.test(data_args, test_list), batch_size=batch_size)
-    if 'cocoMAP' in data_args.map_version:
+    if 'cocoMAP' in data_args.ap_version:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
     else:
@@ -86,7 +86,7 @@ def if_exist(var):
             place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test():
-        if 'cocoMAP' in data_args.map_version:
+        if 'cocoMAP' in data_args.ap_version:
             dts_res = []
             import json
 
@@ -168,12 +168,12 @@ def test():
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        map_version = args.map_version,
+        ap_version = args.ap_version,
         toy=0,
         data_dir=data_dir,
         label_file=label_file,
-        apply_distort=args.apply_distort,
-        apply_expand=args.apply_expand,
+        apply_distort=False,
+        apply_expand=False,
         resize_h=args.resize_h,
         resize_w=args.resize_w,
         mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 3e2f48258a..502e2413b4 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -25,10 +25,10 @@
 
 
 class Settings(object):
-    def __init__(self, dataset, map_version, toy, data_dir, label_file, resize_h, resize_w,
+    def __init__(self, dataset, ap_version, toy, data_dir, label_file, resize_h, resize_w,
                  mean_value, apply_distort, apply_expand):
         self._dataset = dataset
-        self._map_version = map_version
+        self._ap_version = ap_version
         self._toy = toy
         self._data_dir = data_dir
         if 'pascalvoc' in dataset:
@@ -59,8 +59,8 @@ def dataset(self):
         return self._dataset
 
     @property
-    def map_version(self):
-        return self._map_version
+    def ap_version(self):
+        return self._ap_version
     @property
     def toy(self):
         return self._toy
@@ -257,7 +257,7 @@ def reader():
                 sample_labels = np.array(sample_labels)
                 if len(sample_labels) == 0:
                     continue
-                if 'cocoMAP' in settings.map_version:
+                if 'cocoMAP' in settings.ap_version:
                     yield img.astype('float32'), \
                         sample_labels[:, 1:5], \
                         sample_labels[:, 0].astype('int32'), \
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 2ca491b6d9..f4a6d12f5e 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -25,7 +25,7 @@
 add_arg('apply_distort',    bool,  True,   "Whether apply distort")
 add_arg('apply_expand',     bool,  False,  "Whether appley expand")
 add_arg('nms_threshold',    float, 0.5,    "nms threshold")
-add_arg('map_version',      str,   'integral',   "integral, 11points, and cocoMAP")
+add_arg('ap_version',       str,   'integral',   "integral, 11points, and cocoMAP")
 add_arg('resize_h',         int,   300,    "resize image size")
 add_arg('resize_w',         int,   300,    "resize image size")
 add_arg('mean_value_B',     float, 127.5, "mean value which will be subtracted")  #123.68
@@ -238,7 +238,7 @@ def if_exist(var):
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
         reader.test(data_args, val_file_list), batch_size=batch_size)
-    if 'cocoMAP' in data_args.map_version:
+    if 'cocoMAP' in data_args.ap_version:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
     else:
@@ -246,7 +246,7 @@ def if_exist(var):
             place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test(pass_id):
-        if 'cocoMAP' in data_args.map_version:
+        if 'cocoMAP' in data_args.ap_version:
             dts_res = []
             import json
 
@@ -354,7 +354,7 @@ def test(pass_id):
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        map_version = args.map_version,
+        ap_version = args.ap_version,
         toy=args.is_toy,
         data_dir=data_dir,
         label_file=label_file,

From 7ebd4bdab56965c7abfb81f4fcc15ac93732023d Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 16:14:23 +0800
Subject: [PATCH 25/40] fix bug when cocoMAP

---
 fluid/object_detection/eval.py  | 28 ++++++++++++++--------------
 fluid/object_detection/train.py | 29 ++++++++++++++---------------
 2 files changed, 28 insertions(+), 29 deletions(-)

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 9eb7e0be45..e827e0bafa 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -54,18 +54,6 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
     loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
     loss = fluid.layers.reduce_sum(loss)
 
-    test_program = fluid.default_main_program().clone(for_test=True)
-    with fluid.program_guard(test_program):
-        map_eval = fluid.evaluator.DetectionMAP(
-            nmsed_out,
-            gt_label,
-            gt_box,
-            difficult,
-            num_classes,
-            overlap_threshold=0.5,
-            evaluate_difficult=False,
-            ap_version=args.ap_version)
-
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
 
@@ -141,6 +129,18 @@ def test():
             cocoEval.summarize()
 
         else:
+            test_program = fluid.default_main_program().clone(for_test=True)
+            with fluid.program_guard(test_program):
+                map_eval = fluid.evaluator.DetectionMAP(
+                    nmsed_out,
+                    gt_label,
+                    gt_box,
+                    difficult,
+                    num_classes,
+                    overlap_threshold=0.5,
+                    evaluate_difficult=False,
+                    ap_version=args.ap_version)
+
             _, accum_map = map_eval.get_map_var()
             map_eval.reset(exe)
             for batch_id, data in enumerate(test_reader()):
@@ -148,7 +148,7 @@ def test():
                                    feed=feeder.feed(data),
                                    fetch_list=[accum_map])
                 if batch_id % 20 == 0:
-                    print("Batch {0}, map {1}".format(idx, test_map[0]))
+                    print("Batch {0}, map {1}".format(batch_id, test_map[0]))
             print("Test model {0}, map {1}".format(model_dir, test_map[0]))
     test()
 
@@ -179,7 +179,7 @@ def test():
         mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
     eval(
         args,
-        test_list=args.test_list,
         data_args=data_args,
+        test_list=args.test_list if len(args.test_list) > 0 else test_list,
         batch_size=args.batch_size,
         model_dir=args.model_dir)
\ No newline at end of file
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index f4a6d12f5e..c6fb34a40d 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -195,18 +195,6 @@ def parallel_exe(args,
                                  box_var)
     loss = fluid.layers.reduce_sum(loss)
 
-    test_program = fluid.default_main_program().clone(for_test=True)
-    with fluid.program_guard(test_program):
-        map_eval = fluid.evaluator.DetectionMAP(
-            nmsed_out,
-            gt_label,
-            gt_box,
-            difficult,
-            num_classes,
-            overlap_threshold=0.5,
-            evaluate_difficult=False,
-            ap_version=args.ap_version)
-
     if 'coco' in data_args.dataset:
         # learning rate decay in 12, 19 pass, respectively
         if '2014' in data_args.dataset:
@@ -301,16 +289,27 @@ def test(pass_id):
             cocoEval.summarize()
 
         else:
+            test_program = fluid.default_main_program().clone(for_test=True)
+            with fluid.program_guard(test_program):
+                map_eval = fluid.evaluator.DetectionMAP(
+                    nmsed_out,
+                    gt_label,
+                    gt_box,
+                    difficult,
+                    num_classes,
+                    overlap_threshold=0.5,
+                    evaluate_difficult=False,
+                    ap_version=args.ap_version)
+
             _, accum_map = map_eval.get_map_var()
             map_eval.reset(exe)
-            test_map = None
             for batch_id, data in enumerate(test_reader()):
                 test_map = exe.run(test_program,
                                    feed=feeder.feed(data),
                                    fetch_list=[accum_map])
                 if batch_id % 20 == 0:
-                    print("Batch {0}".format(batch_id))
-            print("Test {0}, map {1}".format(pass_id, test_map[0]))
+                    print("Batch {0}, map {1}".format(batch_id, test_map[0]))
+            print("Test model {0}, map {1}".format(model_dir, test_map[0]))
     test(-1)
 
     for pass_id in range(num_passes):

From 6f4d939a27f5f0dc4dfca4da431cad8bd3cbcd0f Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 16:43:35 +0800
Subject: [PATCH 26/40] updata READEME.md

---
 fluid/object_detection/README.md | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 504856fc57..d9d8904978 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -52,22 +52,22 @@ Declaration: the MobileNet-v1 SSD model is converted by [TensorFlow model](https
 #### Train on PASCAL VOC
   - Train on one device (/GPU).
   ```python
-  env CUDA_VISIABLE_DEVICES=0 python -u train.py --parallel=False --data='pascalvoc' --pretrained_model='pretrained/ssd_mobilenet_v1_coco/'
+  env CUDA_VISIABLE_DEVICES=0 python -u train.py --parallel=False --dataset='pascalvoc' --pretrained_model='pretrained/ssd_mobilenet_v1_coco/'
   ```
   - Train on multi devices (/GPUs).
 
   ```python
-  env CUDA_VISIABLE_DEVICES=0,1 python -u train.py --batch_size=64 --data='pascalvoc' --pretrained_model='pretrained/ssd_mobilenet_v1_coco/'
+  env CUDA_VISIABLE_DEVICES=0,1 python -u train.py --batch_size=64 --dataset='pascalvoc' --pretrained_model='pretrained/ssd_mobilenet_v1_coco/'
   ```
 
 #### Train on MS-COCO
   - Train on one device (/GPU).
   ```python
-  env CUDA_VISIABLE_DEVICES=0 python -u train.py --parallel=False --data='coco2014' --pretrained_model='pretrained/mobilenet_imagenet/'
+  env CUDA_VISIABLE_DEVICES=0 python -u train.py --parallel=False --dataset='coco2014' --pretrained_model='pretrained/mobilenet_imagenet/'
   ```
   - Train on multi devices (/GPUs).
   ```python
-  env CUDA_VISIABLE_DEVICES=0,1 python -u train.py --batch_size=64 --data='coco2014' --pretrained_model='pretrained/mobilenet_imagenet/'
+  env CUDA_VISIABLE_DEVICES=0,1 python -u train.py --batch_size=64 --dataset='coco2014' --pretrained_model='pretrained/mobilenet_imagenet/'
   ```
 
 TBD
@@ -75,7 +75,7 @@ TBD
 ### Evaluate
 
 ```python
-env CUDA_VISIABLE_DEVICES=0 python eval.py --model='model/90' --test_list=''
+env CUDA_VISIABLE_DEVICES=0 python eval.py --model_dir='model/90' --test_list=''
 ```
 
 TBD
@@ -83,7 +83,7 @@ TBD
 ### Infer and Visualize
 
 ```python
-env CUDA_VISIABLE_DEVICES=0 python infer.py --batch_size=2 --model='model/90' --test_list=''
+env CUDA_VISIABLE_DEVICES=0 python infer.py --batch_size=2 --model_dir='model/90' --test_list=''
 ```
 
 TBD

From 6bd3f3f3b36e4b91178b4d47b525f333047ac983 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 16:56:18 +0800
Subject: [PATCH 27/40] fix cocoMAP bug

---
 fluid/object_detection/eval.py  | 4 ++--
 fluid/object_detection/train.py | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index e827e0bafa..bd3550c3ff 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -121,7 +121,7 @@ def test():
             print("start evaluate using coco api")
             from pycocotools.coco import COCO
             from pycocotools.cocoeval import COCOeval
-            cocoGt=COCO(os.path.join(args.data_dir,args.val_file_list))
+            cocoGt=COCO(os.path.join(data_args.data_dir, test_list))
             cocoDt=cocoGt.loadRes("detection_result.json")
             cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
             cocoEval.evaluate()
@@ -170,7 +170,7 @@ def test():
         dataset=args.dataset,
         ap_version = args.ap_version,
         toy=0,
-        data_dir=data_dir,
+        data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
         label_file=label_file,
         apply_distort=False,
         apply_expand=False,
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index c6fb34a40d..9cf47f58a7 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -281,7 +281,7 @@ def test(pass_id):
             print("start evaluate using coco api")
             from pycocotools.coco import COCO
             from pycocotools.cocoeval import COCOeval
-            cocoGt=COCO(os.path.join(args.data_dir,args.val_file_list))
+            cocoGt=COCO(os.path.join(data_args.data_dir, val_file_list))
             cocoDt=cocoGt.loadRes("detection_result.json")
             cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
             cocoEval.evaluate()

From 813fb391098857d730983c2cedc62f048bd03b08 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 20:34:20 +0800
Subject: [PATCH 28/40] find strange with test_program =
 fluid.default_main_program().clone(for_test=True)

---
 fluid/object_detection/train.py | 116 ++++++++------------------------
 1 file changed, 27 insertions(+), 89 deletions(-)

diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 9cf47f58a7..dd25370861 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -15,17 +15,15 @@
 # yapf: disable
 add_arg('learning_rate',    float, 0.001,     "Learning rate.")
 add_arg('batch_size',       int,   32,        "Minibatch size.")
-#add_arg('num_passes',       int,   25,        "Epoch number.")
-add_arg('num_passes',       int,   0,        "Epoch number.")
+add_arg('num_passes',       int,   25,        "Epoch number.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
 add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017, and pascalvoc.")
 add_arg('model_save_dir',   str,   'model',     "The path to save model.")
-#add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
-add_arg('pretrained_model', str,   'train_coco_pre/24/', "The init model path.")
+add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
 add_arg('apply_distort',    bool,  True,   "Whether apply distort")
 add_arg('apply_expand',     bool,  False,  "Whether appley expand")
 add_arg('nms_threshold',    float, 0.5,    "nms threshold")
-add_arg('ap_version',       str,   'integral',   "integral, 11points, and cocoMAP")
+add_arg('ap_version',       str,   'integral',   "integral, 11points")
 add_arg('resize_h',         int,   300,    "resize image size")
 add_arg('resize_w',         int,   300,    "resize image size")
 add_arg('mean_value_B',     float, 127.5, "mean value which will be subtracted")  #123.68
@@ -195,6 +193,18 @@ def parallel_exe(args,
                                  box_var)
     loss = fluid.layers.reduce_sum(loss)
 
+    test_program = fluid.default_main_program().clone(for_test=True)
+    with fluid.program_guard(test_program):
+        map_eval = fluid.evaluator.DetectionMAP(
+            nmsed_out,
+            gt_label,
+            gt_box,
+            difficult,
+            num_classes,
+            overlap_threshold=0.5,
+            evaluate_difficult=False,
+            ap_version=args.ap_version)
+
     if 'coco' in data_args.dataset:
         # learning rate decay in 12, 19 pass, respectively
         if '2014' in data_args.dataset:
@@ -226,91 +236,19 @@ def if_exist(var):
         reader.train(data_args, train_file_list), batch_size=batch_size)
     test_reader = paddle.batch(
         reader.test(data_args, val_file_list), batch_size=batch_size)
-    if 'cocoMAP' in data_args.ap_version:
-        feeder = fluid.DataFeeder(
-            place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
-    else:
-        feeder = fluid.DataFeeder(
-            place=place, feed_list=[image, gt_box, gt_label, difficult])
+    feeder = fluid.DataFeeder(
+        place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test(pass_id):
-        if 'cocoMAP' in data_args.ap_version:
-            dts_res = []
-            import json
-
-            for batch_id, data in enumerate(test_reader()):
-                nmsed_out_v = exe.run(fluid.default_main_program(),
-                                        feed=feeder.feed(data),
-                                        fetch_list=[nmsed_out],
-                                        return_numpy=False)
-                if batch_id % 20 == 0:
-                    print("Batch {0}".format(batch_id))
-
-                lod = nmsed_out_v[0].lod()[0]
-                nmsed_out_v = np.array(nmsed_out_v[0])
-                real_batch_size = min(batch_size, len(data))
-                assert (len(lod) == real_batch_size + 1), \
-                "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
-                k = 0
-                for i in range(real_batch_size):
-                    dt_num_this_img = lod[i + 1] - lod[i]
-                    image_id = int(data[i][4][0])
-                    image_width = int(data[i][4][1])
-                    image_height = int(data[i][4][2])
-                    for j in range(dt_num_this_img):
-                        dt = nmsed_out_v[k]
-                        k = k + 1
-                        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
-                        xmin = max(min(xmin, 1.0), 0.0) * image_width
-                        ymin = max(min(ymin, 1.0), 0.0) * image_height
-                        xmax = max(min(xmax, 1.0), 0.0) * image_width
-                        ymax = max(min(ymax, 1.0), 0.0) * image_height
-                        w = xmax - xmin
-                        h = ymax - ymin
-                        bbox = [xmin, ymin, w, h]
-                        dt_res = {
-                            'image_id' : image_id,
-                            'category_id' : category_id,
-                            'bbox' : bbox,
-                            'score' : score
-                        }
-                        dts_res.append(dt_res)
-            
-            with open("detection_result.json", 'w') as outfile:
-                json.dump(dts_res, outfile)
-            print("start evaluate using coco api")
-            from pycocotools.coco import COCO
-            from pycocotools.cocoeval import COCOeval
-            cocoGt=COCO(os.path.join(data_args.data_dir, val_file_list))
-            cocoDt=cocoGt.loadRes("detection_result.json")
-            cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
-            cocoEval.evaluate()
-            cocoEval.accumulate()
-            cocoEval.summarize()
-
-        else:
-            test_program = fluid.default_main_program().clone(for_test=True)
-            with fluid.program_guard(test_program):
-                map_eval = fluid.evaluator.DetectionMAP(
-                    nmsed_out,
-                    gt_label,
-                    gt_box,
-                    difficult,
-                    num_classes,
-                    overlap_threshold=0.5,
-                    evaluate_difficult=False,
-                    ap_version=args.ap_version)
-
-            _, accum_map = map_eval.get_map_var()
-            map_eval.reset(exe)
-            for batch_id, data in enumerate(test_reader()):
-                test_map = exe.run(test_program,
-                                   feed=feeder.feed(data),
-                                   fetch_list=[accum_map])
-                if batch_id % 20 == 0:
-                    print("Batch {0}, map {1}".format(batch_id, test_map[0]))
-            print("Test model {0}, map {1}".format(model_dir, test_map[0]))
-    test(-1)
+        _, accum_map = map_eval.get_map_var()
+        map_eval.reset(exe)
+        for batch_id, data in enumerate(test_reader()):
+            test_map = exe.run(test_program,
+                               feed=feeder.feed(data),
+                               fetch_list=[accum_map])
+            if batch_id % 20 == 0:
+                print("Batch {0}, map {1}".format(batch_id, test_map[0]))
+        print("Pass {0}, map {1}".format(pass_id, test_map[0]))
 
     for pass_id in range(num_passes):
         start_time = time.time()
@@ -326,7 +264,7 @@ def test(pass_id):
             if batch_id % 20 == 0:
                 print("Pass {0}, batch {1}, loss {2}, time {3}".format(
                     pass_id, batch_id, loss_v, start_time - prev_start_time))
-    test(pass_id)
+        test(pass_id)
 
         if pass_id % 10 == 0 or pass_id == num_passes - 1:
             model_path = os.path.join(model_save_dir, str(pass_id))

From 2d0d2aed12fef60d1c5a148d6cadb71d1b5b7b7b Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 22:33:55 +0800
Subject: [PATCH 29/40] add inference and visualize, awa, README.md

---
 fluid/object_detection/README.md |  28 +++++++-
 fluid/object_detection/infer.py  | 108 +++++++++++++++++++++++++++++++
 fluid/object_detection/reader.py |  55 ++++++----------
 3 files changed, 154 insertions(+), 37 deletions(-)
 create mode 100644 fluid/object_detection/infer.py

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index d9d8904978..62be1a5b69 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -75,17 +75,39 @@ TBD
 ### Evaluate
 
 ```python
-env CUDA_VISIABLE_DEVICES=0 python eval.py --model_dir='model/90' --test_list=''
+env CUDA_VISIABLE_DEVICES=0 python eval.py --model_dir='train_pascal_model/90' --test_list='' --ap_version='integral'
+env CUDA_VISIABLE_DEVICES=0 python eval.py --model_dir='train_coco_model/20' --ap_version='cocoMAP'
 ```
+You can evaluate your trained model in difference metric like 11point, integral and cocoMAP which is a special mAP metric used in COCO dataset.
+Note we set the defualt test list to the dataset's test/val list, you can use your own test list by setting test_list args.
 
 TBD
 
 ### Infer and Visualize
 
 ```python
-env CUDA_VISIABLE_DEVICES=0 python infer.py --batch_size=2 --model_dir='model/90' --test_list=''
+env CUDA_VISIABLE_DEVICES=0 python infer.py --model_dir='train_coco_model/20' --image_path='./data/coco/val2014/COCO_val2014_000000000139.jpg'
 ```
-
+Below is the examples after running python infer.py to inference and visualize the model result.
+<p align="center">
+<img src="images/COCO_val2014_000000000139.jpg" height=150 width=200 hspace='10'/>
+<img src="images/COCO_val2014_000000000785.jpg" height=150 width=200 hspace='10'/>
+<img src="images/COCO_val2014_000000000885.jpg" height=150 width=100 hspace='10'/>
+<img src="images/COCO_val2014_000000142324.jpg" height=150 width=200 hspace='10'/>
+<img src="images/COCO_val2014_000000144003.jpg" height=150 width=200 hspace='10'/> <br />
+MobileNet-SSD300x300 Visualization Examples
+</p>
+
+COCO_val2014_000000000139
+![COCO_val2014_000000000139.jpg](images/COCO_val2014_000000000139.jpg)
+COCO_val2014_000000000785
+![COCO_val2014_000000000785.jpg](images/COCO_val2014_000000000785.jpg)
+COCO_val2014_000000000885
+![COCO_val2014_000000000885.jpg](images/COCO_val2014_000000000885.jpg)
+COCO_val2014_000000142324
+![COCO_val2014_000000142324.jpg](images/COCO_val2014_000000142324.jpg)
+COCO_val2014_000000144003
+![COCO_val2014_000000144003.jpg](images/COCO_val2014_000000144003.jpg)
 TBD
 
 ### Released Model
diff --git a/fluid/object_detection/infer.py b/fluid/object_detection/infer.py
new file mode 100644
index 0000000000..bedd7aeaeb
--- /dev/null
+++ b/fluid/object_detection/infer.py
@@ -0,0 +1,108 @@
+import os
+import time
+import numpy as np
+import argparse
+import functools
+from PIL import Image
+from PIL import ImageDraw
+
+import paddle
+import paddle.fluid as fluid
+import reader
+from mobilenet_ssd import mobile_net
+from utility import add_arguments, print_arguments
+
+parser = argparse.ArgumentParser(description=__doc__)
+add_arg = functools.partial(add_arguments, argparser=parser)
+# yapf: disable
+add_arg('dataset',          str,   'coco',    "coco and pascalvoc.")
+add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
+add_arg('image_path',       str,   '',        "The image used to inference and visualize.")
+add_arg('model_dir',        str,   '',     "The model path.")
+add_arg('nms_threshold',    float, 0.5,    "nms threshold")
+add_arg('confs_threshold',  float, 0.2,    "confidence threshold for draw bbox")
+add_arg('resize_h',         int,   300,    "resize image size")
+add_arg('resize_w',         int,   300,    "resize image size")
+add_arg('mean_value_B',     float, 127.5,  "mean value for B channel which will be subtracted")  #123.68
+add_arg('mean_value_G',     float, 127.5,  "mean value for G channel which will be subtracted")  #116.78
+add_arg('mean_value_R',     float, 127.5,  "mean value for R channel which will be subtracted")  #103.94
+# yapf: enable
+
+def infer(args, data_args, image_path, model_dir):
+    image_shape = [3, data_args.resize_h, data_args.resize_w]
+    if 'coco' in data_args.dataset:
+        num_classes = 91
+    elif 'pascalvoc' in data_args.dataset:
+        num_classes = 21
+
+    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
+    locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
+    nmsed_out = fluid.layers.detection_output(
+        locs, confs, box, box_var, nms_threshold=args.nms_threshold)
+
+    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
+    exe = fluid.Executor(place)
+
+    if model_dir:
+        def if_exist(var):
+            return os.path.exists(os.path.join(model_dir, var.name))
+        fluid.io.load_vars(exe, model_dir, predicate=if_exist)
+
+    infer_reader = reader.infer(data_args, image_path)
+    feeder = fluid.DataFeeder(
+        place=place, feed_list=[image])
+
+    def infer():
+        data = infer_reader()
+        nmsed_out_v = exe.run(fluid.default_main_program(),
+                                feed=feeder.feed([[data]]),
+                                fetch_list=[nmsed_out],
+                                return_numpy=False)
+        nmsed_out_v = np.array(nmsed_out_v[0])
+        draw_bounding_box_on_image(image_path, nmsed_out_v, args.confs_threshold)
+        for dt in nmsed_out_v:
+            category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+    infer()
+
+def draw_bounding_box_on_image(image_path, nms_out, confs_threshold):
+    image = Image.open(image_path)
+    draw = ImageDraw.Draw(image)
+    im_width, im_height = image.size
+
+    for dt in nms_out:
+        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+        if score < confs_threshold:
+            continue
+        bbox = dt[2:]
+        xmin, ymin, xmax, ymax = bbox
+        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
+                                      ymin * im_height, ymax * im_height)
+        draw.line(
+            [(left, top), (left, bottom), (right, bottom), (right, top),
+             (left, top)],
+            width=4,
+            fill='red')
+    image_name = image_path.split('/')[-1]
+    print("image with bbox drawed saved as {}".format(image_name))
+    image.save(image_name)
+
+if __name__ == '__main__':
+    args = parser.parse_args()
+    print_arguments(args)
+
+    data_args = reader.Settings(
+        dataset=args.dataset,
+        ap_version = '',
+        toy=0,
+        data_dir='',
+        label_file='',
+        apply_distort=False,
+        apply_expand=False,
+        resize_h=args.resize_h,
+        resize_w=args.resize_w,
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
+    infer(
+        args,
+        data_args=data_args,
+        image_path=args.image_path,
+        model_dir=args.model_dir)
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 502e2413b4..bf276bf2b1 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -273,38 +273,6 @@ def reader():
 
     return reader
 
-
-def draw_bounding_box_on_image(image,
-                               sample_labels,
-                               image_name,
-                               category_names,
-                               color='red',
-                               thickness=4,
-                               with_text=True,
-                               normalized=True):
-    image = Image.fromarray(image)
-    draw = ImageDraw.Draw(image)
-    im_width, im_height = image.size
-    if not normalized:
-        im_width, im_height = 1, 1
-    for item in sample_labels:
-        label = item[0]
-        category_name = category_names[int(label)]
-        bbox = item[1:5]
-        xmin, ymin, xmax, ymax = bbox
-        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
-                                      ymin * im_height, ymax * im_height)
-        draw.line(
-            [(left, top), (left, bottom), (right, bottom), (right, top),
-             (left, top)],
-            width=thickness,
-            fill=color)
-        if with_text:
-            if image.mode == 'RGB':
-                draw.text((left, top), category_name, (255, 255, 0))
-    image.save(image_name)
-
-
 def train(settings, file_list, shuffle=True):
     file_list = os.path.join(settings.data_dir, file_list)
     if 'coco' in settings.dataset:
@@ -333,5 +301,24 @@ def test(settings, file_list):
         return _reader_creator(settings, file_list, 'test', False)
 
 
-def infer(settings, file_list):
-    return _reader_creator(settings, file_list, 'infer', False)
+def infer(settings, image_path):
+    def reader():
+        img = Image.open(image_path)
+        if img.mode == 'L':
+            img = im.convert('RGB')
+        im_width, im_height = img.size
+        img = img.resize((settings.resize_w, settings.resize_h),
+                         Image.ANTIALIAS)
+        img = np.array(img)
+        # HWC to CHW
+        if len(img.shape) == 3:
+            img = np.swapaxes(img, 1, 2)
+            img = np.swapaxes(img, 1, 0)
+        # RBG to BGR
+        img = img[[2, 1, 0], :, :]
+        img = img.astype('float32')
+        img -= settings.img_mean
+        img = img * 0.007843
+        return img
+
+    return reader

From 4efa01ce0a1beeaa1d768c9614af80bd8121a648 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 22:40:12 +0800
Subject: [PATCH 30/40] upload infer&visual example image

---
 fluid/object_detection/eval.py                  |   8 ++++----
 .../images/COCO_val2014_000000000139.jpg        | Bin 0 -> 42057 bytes
 .../images/COCO_val2014_000000000785.jpg        | Bin 0 -> 38022 bytes
 .../images/COCO_val2014_000000000885.jpg        | Bin 0 -> 29582 bytes
 .../images/COCO_val2014_000000142324.jpg        | Bin 0 -> 67199 bytes
 .../images/COCO_val2014_000000144003.jpg        | Bin 0 -> 39911 bytes
 fluid/object_detection/reader.py                |   1 -
 7 files changed, 4 insertions(+), 5 deletions(-)
 create mode 100644 fluid/object_detection/images/COCO_val2014_000000000139.jpg
 create mode 100644 fluid/object_detection/images/COCO_val2014_000000000785.jpg
 create mode 100644 fluid/object_detection/images/COCO_val2014_000000000885.jpg
 create mode 100644 fluid/object_detection/images/COCO_val2014_000000142324.jpg
 create mode 100644 fluid/object_detection/images/COCO_val2014_000000144003.jpg

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index bd3550c3ff..34f367f959 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -18,14 +18,14 @@
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
 add_arg('data_dir',         str,   '',        "The data root path.")
 add_arg('test_list',        str,   '',        "The testing data lists.")
-add_arg('model_dir',        str,   '',     "The path to save model.")
+add_arg('model_dir',        str,   '',     "The model path.")
 add_arg('nms_threshold',    float, 0.5,    "nms threshold")
 add_arg('ap_version',       str,   'integral',   "integral, 11points, and cocoMAP")
 add_arg('resize_h',         int,   300,    "resize image size")
 add_arg('resize_w',         int,   300,    "resize image size")
-add_arg('mean_value_B',     float, 127.5, "mean value for B channel which will be subtracted")  #123.68
-add_arg('mean_value_G',     float, 127.5, "mean value for G channel which will be subtracted")  #116.78
-add_arg('mean_value_R',     float, 127.5, "mean value for R channel which will be subtracted")  #103.94
+add_arg('mean_value_B',     float, 127.5,  "mean value for B channel which will be subtracted")  #123.68
+add_arg('mean_value_G',     float, 127.5,  "mean value for G channel which will be subtracted")  #116.78
+add_arg('mean_value_R',     float, 127.5,  "mean value for R channel which will be subtracted")  #103.94
 # yapf: enable
 
 
diff --git a/fluid/object_detection/images/COCO_val2014_000000000139.jpg b/fluid/object_detection/images/COCO_val2014_000000000139.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..203a5273c134bb78bcb860c832bde2e2f49a42f2
GIT binary patch
literal 42057
zcmbTdWmFqs7X}!t6bi*#JQOGrq{XE`aF-HX3PlP8EiNqtN+~VH-HHbYT3m`2hvHh?
zT?zyXBy7I#+p~Ml{@UHiNe&Yx@63JQd!PF}_s-nU-LC@3)s@wh0XR4S01oyAxL*J$
z03JSgK=^>*At50l5z#|p5^7SCM~_J8D5=P(ndpJcO!SP5EbRQ8EUdh2jEr1j+`KOZ
zgoTBHoD$OFf>QiK!h-)91c!)-h~yCoEh#Cj;4{W&g8!el`wjrbL!1)aU_2aF04@a%
z9tF;QHvoveP6C|&Zh-%D!@<SFCwM^kkcjvZ_JBHa04@$59xgr}0RcWf_UJ(D{{i?E
z1eDJN<Q`DzS`o6kQwxT~<vwJSuj-_MjQ?X7viA5yMEv9_Egk)H4o)s^kg$lTn7D+b
zf})bLimIBrp1y&hk+F%XjqN)-dk3har<b>nub+QF=;yF6;a|T+#3v*sC8wmOrRU`r
zzzd6t5hc|%wRQCkjZMwJySjUN`}zk4Cnl$+XJ+UA&abU+Y;JAu?C$NKo}D8vF0Za{
zQ2((D2Y~lKv#_84GwlCi7X{WXTzq^ye8T_Og@fyhedAH!6Fd`mKq;q7Xys1DDj4#R
zT0Snfs*{LK2=b4{+GCvf3A^yx^V9#B_Fv2X-x>Dl|CeR|mtp_6UGo4EJRI!e;ZXp<
zfGGg6_jA6F3(^8gTm^7MV`zk~5_JX+l&gTF0KYID0<EQfkU*^~OB5mBia;(VDD}Cg
zf;@<*^Po>F$Yy?M37ZRli050D+zhG{G%xif+|wV7h>9=Q%9V7tD;XpU@C)c8V?{d0
z1jM;61^okuIh4>fzsN14&jQZWKrYDeHx7EVYY-=<%7{cnUDHQ$sAI`c^aW^;)Jr+f
z&XtdHJOT$^PDDj*oylcQqb7x;^jU?ys*^6>=wT>AIM!UjlQy0R55c6YvNuyQ%%<tT
zrov+I;hSy5gCy$yp_mkL&TKss$@FwC%c<F%h@2Ei5q>!E$op>JtL0mx_3L{;-Q;<R
z0qcOfLk0~)lFH&A8hOU}01uEW@h1twTqaT@w!lj=)qE1dTg5EcGgNxv=MXq3^0&lG
z9F`*_-QG-o@Qc4kpJE58NkVs9Fm+OM<f=NdkuZhV)D$|-U7;*<8&jWG(SB^Fmx!Mz
zsyHpyET{lv%={hoI(K1#Rl1NWR7ibQGC_E8Vmqn#L44#mM4$jd?@j$hF<^pH4)L%s
z){(YL*e-!OG1&V#heOU=XyFf4sQCj5O;&heoYLSk;j|ug!&GIi#$LO0sH$*UVd|kk
zi4Y{~f{xLE1H$TPqaP`tD*|~J*Lb)8>$D#+H}xXYATQ2jSkXM1RE_(<fazN~C8%rJ
zG%SPjj{9@Eh3pVh{&X~fL(*sDqSJe*AdEOdV?lqz-*Yz<HElF)%cw-jnd_E1UGO)u
zRL)yCiK&-5P)qS_p{MH)5mMWUuP6(45&!Czs&7lRD(my)*0B|@#s}7e6_$H|(bKUy
z#Veuy9O0;jF?U`34kqa0xUCg;Lt}`JyUs$$M6h(TUI&Y(f{-(^#8ln%%w+F-y7{E1
zTBd#ohr_VaFY1gvf^RjdTFT5(7-YCr_k>+WbAmUH)!%{wj>uD0LS3z4J^=NX6yZd`
zm-ys_Ry>9fEgqe`kj2k%7Ls<aI3%K&^+(?y0oUi+H_R83mN6Un`p=<o0!V_q`}ZY!
zT?_8rj$4C*#s@z&y@;MGoHgC~m)lsV4(e0Q?LCqGFlqNH*kVb`3V!$PDg~_G>JpF;
zpL5rh2~^*W^5!FXr>r}bltN=6TSokHFzca+ZGC`uF`c%WaQdO9aoToVvu@;DdQE$P
zHz!<!t70al0^|DQl@ciCha7{sPrnqEX1HN{IOObv*Z60Qa|~q??TB2LdsZtM%S=q_
z%out2Rka7%l~L85W6pP}%!Ctz(f)X|D3jwTnj|-)A>BH%l||)sX#in+xpaT>u?U@v
ziJi@Y&msZpJbUoxoO%Ps<h_-EE3r}RLe(s|5VS}&fb;6T-1Dz&2x*-?{gQit0RNAA
zk-di{L&b{JwJXg4+p10CYPvUg`41Gv6E}gqwav-J1BDIr(86pr$QX%m;)be{f)a`F
zm+Tl*d60^GTE1`rgaYz%nA$D^z|HMYAn>p-PN#<!;qdS$6r>5EK{%8~6~H?S3UI9>
zOw|?Wp>QbjwIGLjR3dTal(W*SDDszmj#g}^2&R&nsuJaI=7!?M#<%NnaJpWPKthh3
z<Ta`<?<^5JXEb@5(@LS)gQXG@Bkr2<H$uN_fS$Fmm;(db=Cnq07Vr`+I$#DNWS|w@
z*cich6HSexAI;DY@iVZOStQQ&D|6lCRb#&pdGP10k>cBO#Ljf32q(31f?lJCkF~&|
zpkqEAd*H~QzNyTqEDIgSIGcfTVKc^wgb&p;npuf(iHnO;c~(hx^{Um&OZXq9ehM<}
zR2opF1@u<DfGD)@K+oO$AuZD|aU!}!Z{nrVd{T#FF%m&Ts$O*pAwY`oE7r7h7FbhL
zc5^YQiQ^`XNz%A}emje|A+dC_R>xmRtM^$qY{XxDU?<Xul-^?IV}rEBE1n<SH7b;D
zJ~%jZuDPC*oIkImAT`!4`=n%+wTjJkHBGOLK$jInotJvqT{K3wDzAUGnApY}+w0a6
zB^EJfXg>fG@1deFbmmD0eB*oX_QYl>+g|<x9`LQXN>->`6l$#hLU{aQ)4wqPawj2v
z4>&M{K}zSQ^}{`zR@AnQ*zDJ(@S~x4Tb=yCq=5lc#LAaT?hO;RSV8cTNaXGyL%yrQ
zcuLoJ`*JDaFh6SK_ZDw>5n;(M^j_zlvG((vsitC@y|of&4aKyAVKHI5*s@o`iD^0M
z08?%uWe04&$qOdtD#*d3{tOwH?WHahwz&sX>Os7V+zWf6kGEL>2?5a)q$}uvpXu?8
zH&uc_{hRPj#|my@Jk#FVSqS4ZzVZAW)9PfZz8KG1Ol_5h<H?_huF0yh>d=YQagx){
z3!0#n{%kAe?UoAmw2Y}!<q;kkhNy@qnx}UPSu#oaRO|<z(-=2mTY&xNMo&ln*c0Nm
zMCvewA7}w$ewWqKil0RP*cYN>{IEkUF3dhQCdbrHpX0UJ@QbX_eAqF&n~iqmI&d+F
zuADrqCad7gQSJ$sR(bdbiatc@Cb@S1nafh&V0XYqE*f;`+4mB?V2ffV+cFU&CcXA}
z$W5W!D>n*rMV8K#&I@zmUy4FO@RpNSgA9)>i6bKc{U4V?j$IBpk^jxZ+*8(t9WTo{
zupwog6z(AfnLJgiPYQQ$g!j<qB?W7~R^o>e74!;6K&jWeXc)xH{;Dcy4HN7?eJ`0F
zAOD=68@^xApaxuiD^pmaRvh{xBx6@VKksh=RP7Kd6f7Zvx5$FO6Jt8`A#HOn>`v%z
z|B13^ign-OHbSAP#liR8UvGRmVi-ScbVncFVmC4k{>O$U;7n0TnXffe9{;RsVZssn
zwh3?SJqXBkRoZ4$hs*C*3aRp~daZOM8#-V4aPMOq)bo4l&I=aYqiZC_fH3L6G-VtH
zuMc<&1C_B=d>vKjs<;REzkn3Q3+Y5l!J<qAZXqCMl~%@HMa>on0aL5v5GSKg@!;y~
z)UT>LmBJ;G5s*eC9FMx2L%bKHdV_)l(Xeu?Dsgo9F@2Z{c9-XWN;j;IIC%Y+_pm5Y
z4sX!^p-_z8T#)y68{6|b0uqnq*8>;qnX|c$Drj7t|6rkoZ^hkwBOL}Gh(L5y2JMuK
z+_tAn>*&bVzpRjCDWzl`%wWfvysZ!;>!0!oebO%-rs<dK>^X4zr>FT}+cz(@1g#L`
zdjPd;H|%uLkZ(4a7sxSxypOlLU<<4uJ9raUNDqg&|NVOYAsfe0cXMHJengtV%l-mC
zs4gAF`Biu_spi*fa0J;h*_~JRmPO@q0-i*vaoXX8O<lcI37BeMbhsMEJ?Cv=Gq+L*
zPIBm4zrF`-t&b>OMRk$&ueaiEi?fHdO|beUzH3<}4mtT1(c|{W!Kf;YAW>Grnt5pu
zSI|AGh_+x$*^-)X*A$`a{3rFJTA|iG;DwCE2lX(RMRvJEr#JP+OAW^SX<D(f5+fkl
zVFt%MYMW|zznW|pW;)RL?{N{)lAAjeT*Su^lyeV|Op*WGMrSrv_cdGJTkfAbgg2=x
z14skS|5~X|c1$6N`FsxmIr^3A)(55t$ank1k!WFH#NnS3uIb<1`dt{lTE!rnDwN4+
zJKT5XwSdhp-O)UdmF{%?Iowk=nfOoHqY=OT9IKYu^g{n;m=zBzP>+w~=6WRs>W0EV
zLHoaZDD6_bRnlKW0e9o+c2G~*$HRQ0kz=^q5$dLZ273qo8O{FJxq!F!62p>O{A)5B
z?4ff<;L7W%bo1|w9-+O)FwnROP4i2U74;~INu$LrgrR#zeJ_6OFd3@<hjjnOd2#2k
zSI3l~soBub@6V`^(3^;~xA%agDC-aTxUms4fviH93KfOjAS(mgNq)WKuvl2N|6P<7
zue2&sB+5Ys&ZuXCvk!u@`9-rUD`3WY1gobV98(d#p5*iFyzNV{R6V=)CaaJt!dsqu
zz-&R!GhI_q{QUY>*opN~B*qp%b&lwy@sdosxTN`{N67X`lEToYz3DsPXIvno{l$T)
zeBR$Q{ajzaWVn&%B;L<L=bgzcbcyN(gZ~3|<;Z)$io%^E_euFw(2=PEvf~~A4%5?J
z5)z?Xp5Zln2Gzd@Jlw7oGS+pJ3#gcHr#Tb9omvwwCqk$@d0F&AMW>u38v^{^JDC-}
zvkP^h6+RqS?|4pK`+2N`n^d^%)s&sCNfT?lZNVgMjOn+63lXY8z<^Xh7LOR~l$xog
zS8?Q*l7SDjtCW4{IBcd)!t4i2>xeu)oEOdzCdq$LI`Dzs+bUabpoEt_;iO6U(@Sm>
z`VuwQ0o!(;HFC7^w2%Mh?T9RrQNNRGL3{gql5@mwikVDSygFkdjrwc>^TW!mTivbc
zNIlub_YZV_4nKV=K$fCB3O9Lj*6{D>9^g0i-2wb<bX<wC-+Ygnc&ylbO^$QUu6Abm
z`EGD_kmKu?50D+I(zy{c3A18i>0uqFPxk<-1mt=ph-!=q^3|*nH$}+Kqhv(Sq`~+k
zIP$H(Q`U{tjGyMrax3$~=E-gD-SE33WJNL9oB5snNxcy_YSQQ2kj}4I2Y<TooqK^h
zMug#oW9I6liQiRW>LA<?{;01`RRuPfmaRE$UizrMAW9&sc6!m#8#lqWYPXJy5yDcm
zAwu%SOEUY;ui@8=^GfOIpumqWS`==Ka`xL#w+TyQ4lH1Pc-J{E(4vtrkRRW|nSCWV
zBp|EGRe%x=Qnxi%=H7t|*Gia8LBUBcCx?}n98Fa1^^}-)i|K~1N93EmGj06I+~X-e
zCDf@g1)K#Kw#5?f&amvr$a^15O3jc(v#K2(vU>AM4KU#R&Tb}jA2r>ry`b2u|M~#8
zleX^;-*5uM7-VC##XA-NdF}L!q+<li9>w{lI<LPiIyd}4T}Yk#S?U$@({Wc1OYJaP
zEGU{_J#nMJkZ&}^ISW!hYeXK^)MYI<%iMQS(T%M5px2f)+au{E(BGzC<&rEox0J#{
zE-+>I^M}SfFSVXe)$q`2p!$OqCr_7WOpi@DFe*1wK}O%b)~?)`2y>|_FDexaI*KmR
zz)_rMPeEo5{_B2SuNz!aeEtbN&P~d5JHIweWEQG%_P?y@Ic{`ElX{;iCUE@KwY5}X
z2tP0}Vx}9f5H!>I_8<8E@0B(pTtByQnT^4@%hN2#$f08RE^Xh8FH^W+mZ#;y-d>NR
zD~j^D@qyh&(%!2d*oIrD|FB<Lvjd9>FG`q+>`hMcNj~P3EU0MxWdp|+Bf^XNceGkx
zrO?N_aI@7=*LFbvR`4)9?&0U6&Wo)ut8IujHpzGv`d+SA91%3ch1E?0Zgx2Bs0;qg
zrh4#6_9kdHmQ44?$Xn8-^A)4P^Ao}FG|#67qqpT~vg@huu!e8~om&MU+OgKkYJ5bR
zOoTyxX+saWUVF8!HA%xi-h?H0Q>7p=Wg4||E6(R8470!~y`W(h?1@AJW56Q_gV-ir
zc&xHyjCv_wRBdKzB9?49Ryh?n=U5%O-|d=h0}pWilO#=D-IL=L)HU>b2MQ{U3)%km
z!gMh-+D_bS1Zg<0CEiKk)=q3{d@5o=vb8>ioo;|{zf}Jqb`g!TGKCiWQFrA=;+S*3
z>vbvM*6XA5)i-|^{Q!CbmHJq#Nmxt=gbQaF$?vGmZ|U#`=t-3^hKBBgMsV_7p7Nwd
z*`>>bgH5ZL^SZt~$m7xCKGIT)pL*9U%r-@R51`R(D29CpD;Rrly6bPn8%OJ7ybEmS
z+REviN6OxC??OpduPOvd4YyXDKFk!nkjioO!5eco;CDFYwqo?Pn@V|@?H`OvFMqzf
z8!?PB@cxReV#pacQ6hRspgTdISuPnd-oRwzt<<Uuj-mzSc_U#hzORaYM;i6;1+~b`
z$2vLNNnNQiE)0$x^=a+FF+S@BPZR@p?x^(|GiN=rxbQduwkYB@zC6pdMcBOX9l`2b
zGPxd}@0oclaqZ#26L*~_?aQ#+O@rxg3wgjTohKDRr@E*~bsg(BmV&Vjk+8?G<?ZZX
zLPj$6X1>?n?3g6h-tSkh$$ppqZ7Xyw;o{Z22Y9QYh+|FrRbHuV7<p=Ol>W^N%x8I0
zEOSnUTrV)rxA-b_GAy1h@35t6KKy+qcG5FKYpVOLUoZ^b4cp7U5Qz<Jo|$R=^Dq07
z?7S@K`ovVlCcd(}m!e}m$9B6cE|@@B$<&s@g+<?%($PjYZdAQMxZn5#y@P3D<QSo8
zX_7lN#XVpjkw`B$TO`7=>IwPF7o(>QRCkcx9aiJrt#iA+ki)DXMLOczdj`RKdn#KI
zC*E!s6l9k%XBEV=S6M=G_W&Jx<mk1*nrOnAwEste3u$X`BX@6VCRY)a!Pw^hvpwB9
zDP*C*HastOUfooCT9mHO@1In-j_M5OD-LV+zFWM=xHryhwiGY&xX=V1LBa$Thbh+|
zQ>1!YJh~PsCYtZwZcIi@!0=yReyyF^(=KpL`tez)m5&9EP<4h@nCU-jaVab`1I1-N
z04{s>gSDrp(l;w9-d%?}bmt0mr!stXYu+h*88DNWTsa=)TILNnRhQHg=#la&Q>LER
z>oZjcMA-edpG;r7qq(X~y`qSjF0E~GsXqfM+Mwt+K2n3nPZ-d}eY;KucXW5*z^IQS
zwe)@>4-rp`D|ndL*Rwz(2M`=x8&N0d+>@59GRJ??ptfuh7rTt}0av-JQjTfZCu`}i
z!yl*-!+KFE3Zt0`8BZfLN%S<I@?4oP2D)qVNd{gsFz2Vc)U_CY^O>Y)cP-)0+afbZ
zT+x8~BgL)M|HV4WzwNj&34=66hJNY1!lNmjGWgI<AheuNSe`S<W!(3bimgt|4?I=~
zJf!!|TVDb%QCr*t`ZC*(vGJc=PPhy4&HnSd2cUO35mxyU$uG0;JvAmj)QpFP^9x<s
zS2i+9ZaVyPqo^jJhavNDKiVlkXiG)JNz~XcF9U7su)OJcGwnOc;?_74e$WWZ?#=i%
z`O|8@2)LonAAvrMxeC9asd=B$9XT2QH&(4sv;BR3j19IBrN$Ef;v%jZtpfbv1RrzF
zybVo{)h@~>gDb$b4i8PsMVckH%_FCw>m=b3d$ymJEiBjvj>W-nNaLgp#j8Fx(wur)
z`=VC6JvC=H5=A$}py|T)3GM18rf#f{+yFpaYVY<vltaec8*DnI6csHN(_vSG;Hh!E
z;P01*gzYe)d6f9_R%6Zf9e;;Kek>`LZY;?6Oqvp$nXunfmc4KV8B7k`p5Ywki?|%;
zYF8%gK5(o}4+eU$2F|Vj$O{`5t782t`$HcK#ST*$2Fla~%w06H`;^Qjgtv9<zM4ag
z%tFx)W7<OdCS1c~uUR+|czc>pEox82MI6aFm*u~bdk>(}-#={kmPHX;B|`i|>rA)I
zs7e<T(>13^`IIMLZ;DV32xo+s>kDot1UdSL51R3EixHR5PKMgFK4tpc+-7FeFc`1A
z@as{qz=eoKmto9n%G7nXp|?ekX+4i+5>qa>&z8nrp9d#_Or<^!b%H!3ZOI0|KcaPJ
zi&ct02WRla;`qpP0AzxL3QL!^&Uuy^(tLwH=%%|W(G|1z&4Op;Fnbv9cx5L`GdhQd
zNVh!`@dGT1L|69qW;u#7>W3FPa=aBvh$)A#SXm&$pgqCnS)quM_<h4TytZwDLk1D3
z*v}!_&Fx?TlbFgm&Lu(cEdnEAXh~H8^L7lXxJDB=6i)@>{#jvsJF)3fq3uS#qIRX}
z9&@P&MwCDtpL)7K?=A>%?Z7ToSjJLJ(vQ+51=<H{x+=F?5fvg%VvWX-cfK|uKW*&~
z%(Xo{V5aQ_!0WUwtc(IRe2KJPuy(JOHZpl1r>JAycVm=2N^q1_dO7rVWZ5dtnE#M!
zA54GZz2NjM_uHEM8qs2jp#KTVh<7vhW;*d6P+MTcL2q8FFdrp8^~q??D@bC7@4B(3
zA@=bWv(Vi=0RAR?9mLEzY5t){9Gr_=qnLwqn@QheKwpoymGI=Jlli5!a6*|FEu<&d
z?*YdAdG~+-z7-Uah{p?>-2_JdeBzzjG@We%cTyXN#Z+?GufDTl290{KSAV<m=y`r}
z#_?A_)9+5qKM2s}qc0b`u({n@7M7&*EBa#7Ex#|bpa^`#BD}jg<r0IEzWatbtf*~G
zw3v7{UfedYn|>VixRlZ3W|aDJHOX6wVhTaW=x(~X_3diU!d%3SQZY>CFMEmdL+hBk
zUdIW)A&YRdcZ*;^!lM`ah2hFtCc*=b8eXF+1vWkJ-9O5EuWx8EW6bB!wAdMb8dLk1
z@PwuL>ph^6G3X0?yttXXAi5ZGS;p03mZyOgpF?!WI67Er`9p8BN7)x*^=J0ggGHzr
zS4y+I3NvJdR%FyEHQCD>C(1jxj`;Vg-dD2*jTCYHC_`sSTc>h!M>{Cp$`-{AapUWz
zE2*H`)rt0;nXP?abyuhp4>chcmU~3M<LKzQq<aqq$UVa%nU+TuDQ#7=Om*>H3by3*
zMznsx>%$DP0fxq(l~}`R(9A@Qz?5i;?iy1{K)`NGp#nX~3te5}q=1NZdj(F@B**87
z<br{Wdf@36aN}-&*r@s*5F_4}JvAMH0$Fy&<VmI7X$nOnR}WuJLV4s>(!>I{3{eqQ
z(M!$UgtslaYLNA;T}Q2lW6BS77lG=ZJ|4Z}YkV9|izrwZiritliHNpJv^hfa5KTt>
zrB`ZbBB(@t1|CLej9w#C_RvZ)f9z;b`d6piH7R(EFw<UL^kk6ox7+@m;C$+$2m*gX
zf9qjSU*If?e(vn-6@CAIuCDPO0NiwP2{7_`!ri&JRfb-tB1Tal3x(;9$)*g_U-H;1
z^A4q@otbcN_Kg;ooc$h0%pa=K0nW`B99tJZ<gzmYO$WW4E57U51y(`X3^|Jt+rZrk
z?4qs=epfwp1K#Grf565!?*U%eXp#sVb6b|!GN){flJ-Tc)m$KmJ+&X16Ml#*tQz2J
zagINHE+^&wy7fn|hf3P<W1Ry?_dC@DJf2!F5!NM}FSMxBHz4ZGy0MMz_yD?*vTzdO
zY-QR)3Nv{x?=F%Uc|j0oeSJdSkxa3>+8S1iCzkpuOX%vE6`#e-lnoSR-`I1>{1MPJ
z&LY?CAe30$u1<W;H0}y5r(@uIxfw9EsrL{;X)9wx79fT3U(bT%bN9T8k^5~v-a`Mm
zP+DaoBbga_{OZXgVkfnT;GwzkZ9Gk0jqP-??c7Bn<u!dC%9=HXUFg_Q8`t7#6HV1#
zoWWm=GIMYIQs3X)dq9QLJs>ASlAxB~Eg#k@*OuaqTcv0C>NBVG0?N<;x^Lb?hfM+4
zN%|x0xKY(<sTL*8^L@(OZ0&=T{8|!3*?<_(K}hqrUBUc8!NaLzIq=zZX#)}U<}rn$
zpbl@J;NS`7oJw~)LOW^qxphxoa6#6W6t~Iq9Q<J#VV{ZqAl0SzC5Yo<fXD%|{=Kx>
zqq}|c^@g|h5x?N42)R+uyQnuYT@5Z*>C~utSZry15VO;V)a?(!KACHV)ju3RmYb)E
zI5Fo1=tH=I+RHVepN&2RaKWEPl3lHSYbz!-2xRVQp3eav#)IR^O`)$H6APM?3&+2p
z2r>dE1p|TLKL)=4_F+S>ihw69;31l^q{5^gNU6wF9puf3wGNs2i^VJs)F*InsaoDj
zrRsS5QY_5$;A<g;Vd)>ZZl=@TRaH#-gk`7aT}cgFUl&`^?Vysck_ucnGP;O4NrN1f
z*P8qL32Cn!>Rh+3mh37Qt1CO)-%QSTajtw3Mdg4eO4|~w(JnTG=*-G9k-YAL$GRW#
zzdAj#nGm(SGL8sC7MtZaqJo|310M{u?IwYjK4OblWf-QIYkphc2^V<HRE7Fi?3&tf
zv=VOx%l0x>!yG1CZaV()MWNeKt;)*Gk!$V9XJ(aadwq+I-`bZUSX<h@ef&Wwb`S88
zLpx%s=v*h+=JfV%5;so4U!UFXG3GC0VW&F1HtW@|7>tZCHb{c8!u~;Spy-XvMOl0B
z=vm1~d|y`|YoLBa>12u`XZR7D>kDS%2Fm$G6(r~$FjJ|tj%K!K;KRC3VcbtlQvW*V
z?GQtttnlV!&X0O{+cr`&W=rjNiru$Ms@?HPC&|{Zk#7O@SFx(SlmH_-iOUihD$DCc
zfr}safcR;y7Oon`{-cLT(#Bb>BIv)pneT1@m*98}#&{iw6LC!;;!J7fb(lo2Z=97O
zTyB_!`eL8wTY9~E9cW4elCvF`A^Aw>8u=YPpPW}uTlXD3u4go^ux9b~?D?qxS6*_N
zJhr0@NO*yg`H*b64Z=<gWCcqe)ssEHTVYW5fL;0A1dR0PZTm!3T7l}}Jz)C9M42V5
zFR@+cz~IPF31#s89&ml0@DJl1+5Q<#mv<fQ-8_M#Ee$2`EAqD+!pJ0?VhpN_A_jgc
zJHa1Cu3n%F(=<Mh9-m2<wv&24T0jh$t+N*yxYZJXB?TvE{1)=b`XY7>*cJ!>B<E+N
z)rvNm!@=s#SiMr5^7~=ABX~R?3tW0dz{Ujh<Q;-f2PF(UCgulkEbCm6ogrX^{gt)0
z)``Rf;zsd4^kdzH%R>FH2-)`3$Lt%yx)52gJMcF2MnAPu7xFzqe8HEiwKA|E5Y(h>
zBKufXTFJ?}M2q}ZJ?mBnS+=D-pNqhK3~KE|jsC6(TSqLBEn{8Skw<xqk-6YOI<z-k
z`+<&C`j!Z->WHAwo~n{N^Rwv}kP8T%ZI}m<!)a!gJl5xHMw>($)&lLcOgkqwLw#Kr
zY*}pHOEWOV-~-*w)ITRmph$NQo#Qm$T_9A83tXBm%hVM$>!>)3sK0DJuI1fb>M?}>
z%`JF&g@U-s)bZ<2jmbV|&u%8$(Ow&N89ZQ+6af3;2Wp9O{iV>m&A~x3@n|b<0Nm$G
zYb~Y*AkT;cmDH&N(q#+0iy~zIYCvd1UV>v#h9)S(diZcNtR2n6714O;e;pGU)%2k8
zV3-570r`>4OgBKHiu;SL=j9Btq}iob@tsZL_b#CZ)Zcu|96k(<2bk2X5;Xw#JPIox
znfmRd^0gQhla^>R?##B+|2Xi?w^B71x5xPJm9rIbv$WWlFESFd4n>#+l$BJn1KzVS
zIVxH8{1l3Kv#5ttv{58XIWqsB>2&&fp;(yi{Vc~uOE|*4Fx`e$>19OhBq_nO&k(&R
z;Yp_M9$I)_$X6vXY7|r$Fu`4*H~burl}A->FX3kDOQf#*V%bd95T;t#?JAKz`IT|L
z7J|MQsT+|<{62ReG=cR@wkWKlXd^;EeZ6r}FWi({36~LMyAPs(^!n#g@I`;Y`R*5f
ziwiH57a*}g$K4?=knVq(6&59Rm7WU_P}k1BV)HqsP6zNm=9@)dV7y7PPmf_shzh?D
z!oXF1%|{*Agj=*6)oEqQW`Wa)i^sg1ci<lifL@S`3Pvh>AN<1xyxK0mamtn@Ol)|d
z%G3R7DBTYmZWgbATV^<4ceQNyHqhKb;WvhLAH>O4@BVgvz3%gmillsaMOP>&k0qM;
zGOO0f5r^(NUe*~31ZC_3Isy;rI-I9GdIw)J{_K+nO}#zxJbK;0ZGN6lP+e7EyS%00
zcJWY}TCa%k@C?=rOM0IAhZ|WM-(Bu_=lK&`Zc`Tv@BcNg0*-hOXG_<$keGAs#N>gm
z2vk9jd@VTy8tolU^rDjv<%+1?rGMvSU}W^rU1-q9DJm>K8^jI>mvDXvbIkX~G{Sxv
zeOoVHf|Uc0XJt57`59vM-uUv6y;hP;5c%!skEtRG@8)Y&ugMd3R`xs#_S}mqO3&`1
zKxcIOlC$;emVXz;6nwFO!J*gTijA#t?DYoLMTJLA5IK;})cc^e+7r^<guNAqrWP)K
z4jg$l*N6F)G6f;X9-rnoFW;G&Dv!BkLe>&<88PCT<t7+~<0SNmzW3|0-C^=5ZC}w$
z6L;r`p_1I2WX=@!5@gA;m0o9s_r<TiT^pS|Sk#k-20cg7;`1uyG`ttGa#nk9N<uaC
z;3jNE(MPkbPNt5rUuhnNCA6*Gm9i>^z3Iex-~G-ESR`U;{nQ}Zy(t>t&Bl2!QMIM{
zC6{jm?yA6e0s$Gs3wn$Q?3w@3GhYB^iSl%AWRXqI*SS<s`gP@QG$M<k-GlmN6=}K8
zK7S&wl0dHKxFtE5293Uat8bYWSC!!_`p;(mFN8sSqp@$MIBwXo=`-nQLL*aweT+p`
zom!7WG_OvtY}ZV>BZNB=6sb#52rO6qozIhr<tzmdy3DHrj_j>$g&4T<VO&g>qGc(L
z*5mbfhX<u~7x94yjdGXoVM&Vfy_xwyhb7Fu04B+i@&n5Yk^~*bo%BH&mMYz|vJ<2%
zIIY;Sy9!275RTr#yMx}+$K;CIBj^?6gcoNs1M(*5<)?m60oI`flaD6pcB>4TwS?0n
z2Q&rAOnvATza)qN)6`mYt>zMh^~A=TL+1Pogfqf8_5pfos#+Wlw6zdBY!fKry_6oe
zrz)XcitwL_?ckq>=NeiMl0zQT@Y<(;)&Dm0iQVfoW{LAQKj!z32&LQ77`cfbmdgIT
z*e+{watM)TLSi2R?}FyJ00wD;F2l&C12ifx1a2F#&E{g@*X;~HcWSojd%&fhLVF6G
z>vh@Yg|HckB1l;r-tIgnaKRcc$`|RWoFh6%aa<Q=xu!AJc&FNx^VmTeYS<z)#*d9k
zcF%Dw#~F)E@Pvv^vy)+K8R5Sm=kqEcZE9W$wGs3iC}s%=*C%|rGi6YPDMo{e?*Y2k
zh6bdpMm5-^+OFPw4>-l<&tu@G<Byzfb?K7B?9+Yhe)g7$!u31w*ABLH0&Z%f&%X5k
z3hb6y#y|Ox#JX#r*<hY}p&i08(i_;c2Lr|8>d+uTiHrv0I})!V1=NEnSYJvD3K$gH
z?DRDPWZ6Qq)yEUQrpy<clP%|7m6hnJIheoc8KvoUYOW~6JaZzIu1|c8yQaMn)qN(u
zib-1R4QY;^{%vveeVsk+EwxR;J;1y)gXT$2tB&WhC^5sb9U6XPwSwvC>1J}O_1xKe
z09218i{p3&1!~}&KD6nF*54+I-|c-dh*QXaU`U707;0`F<?0P{l4%TpU6b8WQ4g$J
zp1^jSrA8_L!zY?&1g-O(ezd9r^)wP;^DD^pYz*E#0DmAJM&8GudKC6rgHfe}CanC;
zfG^aEy@;P_s-bd8WXfXEZp8Z{Dtyn}u6VAx0f-#O(xaJc+i88T=~5EqH;|el+Qva{
zk5V^}=#vx?>Cf4xg7q>9oS4s`^fse%^NI4ErY2H+GB)M@p5V0y{+tVd?2%RpKBiHe
zKNmEU8}`<^671+P*bhpR0$l+{t8f6$Vt}D&9ISlQ7k801edbNK_SZ6o$l!-EEnXE7
zJIo4Xmsm+EPrWg?mb)#+@1iMfy7)ue17}1<8nesf7!}%|o;-1=k66uyyX^xQ27>Z$
zh`S<f49o-Q&-cLrmO^cjl??t2{T`s{;y*>`NdqdlSuM6~2U`*ed$G=H|E~tma{BE=
z-jbl|vh!yySu(kV%=GQ&r_SKBkr4$9G8h)+;CG9WE{{P=fib~6VeGT{3S`>u+d=js
z#zlPEVRzWRe6l;)Rc7`ssOCBy-L}6_G){!EtAf!BoV<BNrsgHI`K<tTTyPIKJ8wXD
z>!Lxw_I*dMDmlUbq+p;@CDS5;YNpuBWHqu;wkqtx4-~%j?zk)Np)IA=RVSd$!HqB#
zkY$cz%zB>A9`{(c;~n_dCVehPYjK~D0!O}cHFp<x{Ik$EuIaFKvv*j(%E!pJ<eNON
zpvYjz_&^m&b)ISeuab;Z&cH=9P=-_dQpjDOdw2f&gV@fd_xCbKx51SKu?b`7Jz$=`
zi|qOaOQkyUDqp?w><1=s_P@fpHnP0*f~BRx-0lHW(%?IMq?LEMm>M=t+VbIo2lg1#
zXDHf(5rw$%ev|<9sk<ODOYR<k2j0-QeRbPqF5AA#oQ=T^c#L+#dbMo9i}HZ%0iI7!
zQ+Ot}U&EG*DiA^KKF3z~09-ga`^7f5J3e@q8TcQMZaf!ppTfE_b>17B+Z7anyHkau
zQ>2C3b$Z%IwGz&|7W;jWg{90EKTFX)KnANPg6{#HlBXOs=o2(<qD;d9R&^S2wed_%
zosDOg=<jXtT-=Uztb74k;8r60(5)Jnk6rfwoO{40uU{AKmT26Bp#T2HkobCj<GO4E
ztSkb$UbvmHOqi0QaqWS*Lhb?C`uBi;R8BtsJrLqHgN7lA|5(y509&D4sF&E|2RqQa
zMV4J6FNjP6lOZE}_kht7*fr6I|NTO@-R$BXFb1l={-q&p8yAGo<NSt9z6YfIlqrn}
z4=-sO0$%<B?@WX52r)z)NB^C%;+^K{qkJ-<Bgp&5<mcB0pJ!|P_5z=-L(^-=M~hV0
zAw0RUR*9R;r1o6uukvf~V!p6Z5kvB|hmkRrX_ffPUK9h7Zm;K%{Rp{|WP3C*8EU(I
zu#6iaB{{B(;3`jmWjv?OIoEuU$tA%403MU@8gE#O;s(LjpWLU<4dr&)BV*y};iCp5
zPGah$LxG#%a+$Owq4)kollw5#6kwsf>FI6XJwQ%`W|mw;!}lJ*8-!gC?C#E!Bjc&R
z>56>K<*)Rx1s5UAzk+%U{Tn@uG<a3My7Z1H;3-)YtsmS}L7mjk-XEq7+#?+0#v~z3
z(jA67!H3z~JVrD}tKd(qCOg(L0=3pZA~cPxzH#1Ffz=WB0Df%iz6uTcyH15H3IDE}
z)n<t-+joXi^5~xNnPb(I?f)t-2<L}NqwxRpi}%=!JqYd!%FfkUf2m{YrtW{B#g_gr
zZqc~|(D0o}lx$0nbGgLQDVo7+(ZfKWTw;d_<Z!|&2--~MeHxMp3_z*Y4aV(=|5~hh
zfvVC!mN^S_oGK5K363a#FSDhj74ePt4UG8w=xxZxulNrf?gzK&NUnew_<Q=S(Q9K!
zIPe(&nERP{yXPath4f+gdN*vgDd>x6bCzL$$N=B$EMdZR+zoNv+Rzt==fRmJfT5!S
z{2(W-nWMt^HEX4z-Zm>A7J#?}JCZSGGy;)t`5ZpMRQiQ4bFGh)E%aDtN39oG=HA3{
z<e8WBDKZ{RzkSiF*vD#yZ$vhP`>SzO)A0Lsy<U^ecUSQ>Yv3SgB>kqs?-8qg<w@$!
zztx~5a(Msog=iRe`?J%L_`Sa|evt4j4-K-BrX%w>H#f{J?3rn#s69^glJ@02fL`%t
zOxb`*<1u^G8)a3dhohTN<*~AC#G!Izdv&Q2Oc=)DiSv`ktSiOva`teRQt*p%6pan9
zhF~lilR$QV`XRs6BiFxYGvjUF**Wh4Om;xHXL@qzlbAq!SB?mIsXu*OsWc<vYY!!u
z4oc@I5l<3iY%Id=0VG4-ZWVhoJnyT!`3m^eZKRD?<Fax;TvqI_ip2*6Zmp7ChP`j*
zkMk!$7v{U3>rg5$nB@2R^1NjZR0U#6b_=E|kMb=WOlYTmO6+>ZzJKLY^e3MQ#>S})
z$^Lbdw_NbBHRT^RN|kIBVe1ar$8}o1<vs51Y<ZH1UOfJdNv5_W)8XYnHREUVI6L^>
zs1S1l!)kj2u0ft<6$PMrjUx<(g%uGKLK?eSG*)$Z&5FEOe*wW}n#fb^xQg}I3hNi_
z_&lK^D}L9oT-iW=*rkFwQKikJJ8;I`NI=H72z7H}@rro-yyVC+93>_w3<TiXpbip#
z4Ird;SE9x_O!x0i!y>l1-VO1Ymey5^)F+>Me{3Zr2F)|$94%aGvxkdCPLYjS&=Xk&
z*F`bL$KmWYNVL=7&`kBK8nX4FJILK%{VXjp;qs{oqmuUz9P;8*AE~8t<;bNAsVM>Y
z19t}s$7LTcDQ{@y1v53L_)erL0mSR*3dc1GL(`9(W9}e63g{Z_?O{onx_RU5YxrT`
zt>uO}UHHEg#PNG9;|e1VYTx#%4@W=#AL8T=>(;p;ja6<R-IXl5WX{Ifn9HLN(U#jo
zcyMw3x2M`PoUar=6^5taO-;Xbbg*>(3Y!Oxgf&r`G6m8Yxc-GN{PfU~K;Yj4M8!yM
ziBW{Y{!)<{EX-Jjqbt&(@+>7}k#S1z;i6lb5#H@IV;F?-$T-i!7d#ZQcpK<MpO9hs
zYw=@PUEHHDhWO&WDp=llV1EYJ4Nt%;6u2=I_Qk0mbpj1A|AVK@of|@sc@*4>-KXg7
z4jSQ8waP$~&`|B3q2vd~Po<)U?*Y+qn4rX=mFp`<YYplnI>W_|Dw|J&s(o}vI;~i!
z^Sq#eq-UCGZ<xi@1AQ{Ew+Zy+U~j#FnFh8y+Lhh|m`lJ5m?WC^U^H=fZ=^Z^j+>R&
zBGl4eNVXzYv9gbay|hADh1(UZxLlSjx6%{uj^<nnOY0b@XsO5K_SE=gd&$lUdsS#=
zJsGHeq9+4EsqY*j^7DmlY=S*&H*yj2YaD6@0mafErbtTSo*Li<Z2gmrmrU;&R+k!|
zrhgVITrQkv!#wdf#xCgifoImusE)3li(l;()J~9sf_Fh+Va!0Hz_>g~GsM;w6#8e*
zr2Zey!IVKmHky&gdGVC$GkZwL`CAmOGRVL3!HRX(wh5X$y!^rIE|w?Kzt2&~B0(49
zfqve{a<qL17L3;OZdG%3`;T3ZS!=&+48*O(mw$POmnE<dD>tiMviEn6Q>P^FH1>a&
z7L(KG_8Spc$aTU`%X$+{Tw16tvFpF;K4q{(zwDH8(Ug||2ruYJ{_MR#Y@no31O9Qe
zH&<h;)_PIO;@k$FlpZJJi<yF(m{cMLxz_>*A=%s94R~8R<xueF9#<LW>7Du7FF@wS
ze!z3`pZ)#o^hmB>MmUI^gmw0QWrepCN1G8{RAn~%F}}~B8ssZ?w<){!2Ps-oz7Q!Z
zI??X;eu840XXa?0*kt*rM)%M+Z;(&L*4MfCF1uXZIXr@(?8mUj*_h_d3jNK;o3n}%
zZiCIt3Xle&ea8jKr6e27Pa7YdW#;ycH({+|h%lH}T3^nv|JIMG<OVffn7<iotCs89
z8)2QAE9a0lxpUi%$RL@zQB%<%6-_5~U0t)(=BP++Cmwu2i2yLk)?}e{Zirvzo%`KP
z^Q5O6e_ZihS<Iz<UwV%1D)OpClJM?ZdeQChU3_B!8u_6yAU@i}5KBi0t_r<uJ)9y>
zQ+12yg>e3`!!~+<DNALSyIY8_pLO2@CI!K#8_IOcP@psu-IJJ0u7A}m50r^3m{RaB
z`&oZ!e9)IPO4-rkwG)KfC;TBil{&pRGS^OxVEeZD?cF!L!SSEBx4K`M7sD!kVOtr(
zYrJxbkVO~c7+l&;u1-IusaUdd-^ru}Wf3;J0$ncd-Ij?<>Kvme&x*~`i851csC1x!
zTL$6kRoJ!x@DUO`SX+RNomS1Ae>HKZCwHOiSp3XJ$CZu_2av@O-f6_5%quaUraLGs
zW~i=VqMlPrBzJq<TC^1jFR69qasawmRKrb<JXHcv>-JFN99;I%^Kwqi0q;CoJRrUe
zQ?oyn7e2~_PfUWZxBSk}unueY=CVyC$m$LnFm!Iq5kdMqARJJuo34&eoml%Nm{=V<
zUWCwphWqdPC1rAa1RwybDGtlpH{a9%cUs*`(N5J^l@UvjdI_9q6e3$=K6!i(Xz!Z6
z8y>Q|2dI2Ucixq3a)#>jjUKc9S8+BG-RR7;)Ypo2!=40Hp1<qxqh6h+-Q|`mi?-IB
zimm5}nZ%w#GF0k6e@7y3I?>SbaXtsH5Wb;fK?L~3y1-=JGFdE=41!HL2h%#W$$MO2
z<zaD+XBbwP=|y0k)Gxt*#&chI%er!xCgS}lw>iJaGE3kX((RKP8hQBBp(?Q!z$wXz
z=EZ?aJ&r{T&RXfdO*j#wDw7dBTohUK@<fY$?b@w8PiFS%v~T^V2RK=52sw-DiTZAa
zIior50pA2&-gSqP_T`T5D!sjC28%9V%Ht6j&(e-~vakm|bJvp;sQWvESbkiVRcYDV
z_YtyusKm}`_fHM<QjQkeN{-(YGzHb1yp`pPuI;yq;{P*PTCdp8mU9gI!FOQCB6KHT
zQ|oVDZeKN+49^IfW-ft^Pohuubn3u9`O`YbY*;VB(mQXg5J2N*_eRF4a|>yt{~I}1
z(ZpJYEdgLZcUn~WS!Xu{YE#`?^(pblbAU7ThehQ5-B;%CLE|mT?X?^1{;A@79l)8^
zNcQ|g%689WqbTJgPln%m1McsHhS;k;Bs{c<MuEvZ`R4p&*hh4rQM_!%W1y{mz*~=P
z_^*VanzYVN{b6r)M*)w+cOt`L@2nrDJwdrMkl^}qAB+}Nim)dTL>TxmNc~g}N(EPd
zB?poT>wmnk<Mb*fOO`}Ps}(v53ZM1~=Gzb1;uWu+M{Gd6_AF-F>Mum}EkSA4=V;Ib
z-Rj$uCVz{d);+JS)&y;TlKjD!KPj@5Jzy0&dludwp8ab3{gMqw@8%vL=v1r&2ydVw
z)dLw}>95NED;SNYqgv4ru=^68WW_C?Sw$Od#13~gL<TwR>MR{gN4>QDc^Br%w_4tQ
zPJR#Q2cwgO{e(C%^0fDWdhCvW%DE^i`3;ujgKyY`a;KALO<JP{Q}3=I6?1)m{Arq?
z9&wKDZ{#hVL-eIoK#NznP22yDE2<@<ME~VjKaqP%BGh>oFg*k$T7EMUr?G9elMH2f
zOStZCF<Ms~e+8x*@%J%;U5(xqu}x+GL}BeWQ(}alnTyib;QwAE|9PsX3=%XN?Cf(D
z)7$(<9%Fx}dHxHN3cJ*YTjX03uDe#>)nGnh)wUYbqI79!`_8)X4LfN#yEk4xLT?RA
zTyG+5FiZD<ThQ+WxwAVhbTq$bKeNy1rLWiMMR7SKU$@1jWIrIykP45`?C-inB@yRw
z-8=Zok6#Qw=h0tg9&w#5L+%b?-v)U5&P6(|L$3Tjp+ALuiwJHN--k)fYUPRloT`l5
z{6nsMyu;+f+&+3<!L0VDx}_SJHeiQJNHF-Ekhvx5cT++0cDtDSjmfK{d?Utqe5?NT
z=EjSKm>R`rN?tF+a589RS{}Cq)%85NX`a<vYn<5sExW---FPsEmFU>hcqRI$^@XMD
z)kZ0v;vd*ckrJ;vM7nC&(-9U?qj^jkYR25WuA&wAx|qruJ4VJww+!6)K9$lOlG^t+
zF|^Xc1T4Jsw#x#QK<|^y^zL+F9`<mZ<fwn{ok!-dXu^|2@IeMl@(^25&|gvClybo|
z_hFhE&dXlLTzyCFerz_4sW-e87EcVJM|cCQm5z(%qDj5dUAooTp49KTk-z$^F<GIr
z?P)$;8&dwrMM^0$>$;qj<K#nkh<jFhTt&Ycf^I50^r>!e#mBkR$aq@4vI|=NRewq)
zk7{RI3lnJ^=A!cV_0^7Xc^Wj|C2gZ_@o-fL8E19#w8(XxZ3JqgIam;u8MJJilXyVW
z-xW-H-#`42IWT3SWX8GJ{%;-`M3bGB9E@%m@~NQYgTRA7|CdLwL_G0y6$@9!VB}dg
z>XA`atkc+???_@`1-2|F=XgFLJ0O1Nw9t7EAVt+=6&{Saato+4k)0Nyk=$ZMSCi*7
zQ?|7EbUDqiK$w8?1?CLn1$a9i<M?I>y>++Sf01JiiLgAMNO5A0Sc%KQr{2fMmQJKr
zFgM^eZSuhYg%{|5u;0PbS|ckY346BK3c?xT?aqNklSE$_BR2h}U<NFd`*!Y}Muytx
zn3}>*x3FwlR54&&uAowNjTrlo5H*k35<Z%{cGbl5lgiix4GVT=a&rLuP=Qb#izgY&
z!Bh_-u}=^YDg8&b>mkSj04~(JnhN3Shev+ne1x}bs99P`BPjN&Sez`cR4yt=x~?6r
z8a>e%=F$t1qtX9BDYgjOM79Jrg}Y%2!B;A0rLpMwbLQUbnDx<VjB~9dWINUieq_Sf
z?J=|!?4FtBloitp=ohm4r2reXXuRXebG^{kFixwskq=6$<0Lrc3FjMB=zRk0fpe}N
zV5m<wNO=Q~UYudknmDa2mkn*YvG=+z5qv%jJBfQkT>fH<bB-TdZ@Eb*iV_aF2fW-<
z8J~g6h)qO<J(=Ya8W=>LL_5aJJ!3;dU@NSdye#0eUhwUv-ev9Wqy3@wZR-Qcg#DBh
zK7ut3lXvG&o)Sh2?E9BnK00_ZFOoU-=kSO<Vzs*f@U-Xv5+%8q_4YgPnCu@yPpeMa
zB@bKKBG9kWb({fCuV2~AJ^fxATast?6VQjs#|T&Z<!int{PA9SHOOzF4x6RM?g2x!
zmRJ2nQG&DJ(-T@0pY16{-RGZ)*fx4)+@#rczPe3VlQYU+3qvDv*#LE?wifl-+`MrH
z|L_zrmID$okHMN%OFuDItO%MjFWbPPlr(jO86~MMOm%&=(#ZEsmvecC?N(UpeXL|&
zdxpeIAikZxWXwiC)%sWU=(0NCV;Yt-m0EUtE<<0pc2oSm+<u+^UJAF}^0ogJ%>b>$
zE4JNpM%$OEMd3xtq&#j+J}T18b!D~D-#i}iE07|3B`*;V$}9ZG2QCvIr_HNI+K7*y
z(L~M<vcqS!>kOUa6~spZu8O)!8NT77qZcWO))u|!Fc-dorfSfe%mbl3q}Mapzshsb
z-08aOJpJh7=W=6D!lOM`4zTqy$Ln}GEQ^bM2|VhIvD`BzLlBIjs9{&9_ki9KCTjz0
z%H`+G8Q4CGDO}mD^!B$&6b%-cRv+DsL~Ge}i)rJ>|DqAc?+y;~>oNXv+dbYJ-{(#r
zM(AovE-RHqCh|Dhx_vT!^^3}Yp+W2h+eA&Jx2~Jd<@f@fXyn~Q+zNA6U4&j@rk=0G
zJE`7`s3cmmJK#U1$bEr>l75^isa2yetClOw39>w+Rj0?)C0?ZGH#`2<-j(OKUK^Y_
zW#KK+L*^POF&>I~Jio?e4<cUJ*4;jf5ce_X?G_k0#<A+Wi{er`Q(HNehU$lzYcnVA
z$sv?q7%wmE){XP=*1g*0SDMZg!^8Wd^bzkzLF9TSt@j9l=R6ZqiE_yrXI^Be81SQX
z1)El-_t6*Wj-rLjhVFE4`>Z<qC74Q+;V<k(o*-ous1)XA5+&cDXYCajE3m40)26r&
zT&@)8%U2#*<L^<i2r%(@%1)9=H7QZ?uJ(~Qi+|Sows6#=CX$!bBw5R!nl_uNe!1k<
ziFbm}$9cw!%(9-CF%CT<l74;->y#wLU0uHZVwkKP>*B*EF?vHYvA8myqI>yOR=LYg
zRy3;vWklx&wF|!S4Tx6l+#)b+k9eeSNVT|HKSoD>J7K_zo^s)p;$%N@;&JvdVrH)k
zgS;{{OWS;<`&26XcT)^9>~u}+v;XRokIaO})r4CS*^2R8mvxz3_EJwB<jEU%o@?t^
zg-pfi+xe?Rrok<jy`Jfyg|0-ROlP4o;qXLA<Qu;p*icgdQew$xU&FUu_1a|9VG-N#
z|K*DrB-}6`cTV5)KJabMy2+czhp2Irz&=w=E;o4u9<<}Qy({v}_<C~Dh^_JhQL3x{
z6*ZlzlcRyeg1G+&QC}GqW&3@NqJWAZ3P?$Zv~)9q4-6sQjihvU3?U#jbR$DbcZY<;
z5Yj0PLpKaL4CDX)t@p$G0oGzMcg<XL&OZC>vo8ic=g|<CY{Y!1VPZDLo;dY1<hMN4
zE2140;)D|Opg^Nu(9Nimk#Ux|f~!m7*OQZmTL*_?=szsemdrVMhiR8Vwr3)6n&87w
z%?1a_Imip!x9Yzsyho1wb3T-T{WaSfZkj?4#tsDH$woNh9X(WgPnv1C2z;;UTMq+_
zF^jVmUy_;1Yi&NND(+&%z$}lUjR^j`d;jtB?y2hUBW1Ebma`GfcA$w%dB|+@R4a%)
z&#Io*T?0L80wLKS%hIRJ0pGku+lgt;9aaDFGs}9;gM5D_c@>L0B*5(4%M4%BR{`iI
zzLzIe8{gRyiK~mvwHsE<c5q_sWo4ZEsA4u0uFy8jtxX9ZU(nL}5}uc3#QvA1Xqko$
zy(3vJ*Raz7Cn8MHZIFn&^y+Zhb?Y3L6FZGwfc$v#)pc#4Ypk>?X9>%P;KZ$&?C`Q&
zP)#Cs*{$GYu{+*-UHgh%-Ni+nH(Vv%_#_$)`g3XNC^{SM^@2BPfo9v#5>tEK4x;<S
zCAShUf(?YpsKJpfba~uO7ixC9M_>M0f&=t&wp=l@A)B%@yZP?K{&s5La(bA+&AAx#
zaSdTOAFgH6FRI^G&Td#j7esq?tn)01if#Ad6w^pV%A1?~GzONDd_wC-z;_pQS`c;Z
zyxqL1rA!(FJ7)r_0}xVF0@Cm4OIm^QnVd2!iQ8e7x$l(_%p3+-t7v-yoNrw>caYnt
zBA1l(Xj8^$?49@{`<r#^yBUKj$ZoR9%_Y!d6o43*jNB$*%Bt_~xzVHms$@{akn&43
z@chGq0G*ES#gN}as*o)_&>17-oFuIcf$@<6{zVA9f3k;hciV3t&>*xDjX~T+bq8ox
z3m8KKpjOD*KP*OD$nTNMNigOq<eIo()6R1;0`nAxp|5K;gp9xMI^Fn(wfS|$4S*D1
zfRYm6+9L}NT`*cV5va1R-J`BEgMV0Cu}-&oQ`eKgeLNlqE~o^0V|~CMMst0;hE}pX
zY5j+l!vk5H{)g2!*|z)t?+ZMKK$o8qUTHzb|3cQA|6vVLz|QKcKzpxYx1_+4G{7HV
z^ltzqJzfGXTk&figdFC+M?r2gD@$yUMk(GTitp*_)?JpW5ju)Q&qiei=8+;UXfX~!
z3N3_Mr@`0FxsyfPv5WJjTWO7dSjr~%@2i<Xe&^MiGg;Ix(*f|4PLo^BvGC{bbGdOf
z6|P&R18?A~ix>eQM|cqLLUDT5RBW)p>6=Rp<H+7GRV~L+U&)0_4c{&W15;)t3`yCS
z{8(|?@4yQhSk7*0v8{|j;9q)4c7M0u*zV?v^wYN|w{h~j3s|v>>~=@Kcwkiu`^~^7
zt0mRL9G$0^f-6$+OHTFu{W>hK<c_$Hlh}9K_3NV<>f{W`h75izK*H50>|QlT6iYw<
zF!^8?^^Uzc#$2uum^6W;I9t*5g1dTQrxRBur3JVs;9*usJPF%xXYr8N9?uud(s3s?
zYx3s=SS?(Ls~g;cauF9K{mL<AoG)F}6&jKb)^^60#MR#z+GnojN8z<I#w&jyz}+^I
zi5{MNC4OS8{qc_rv;(Mbqgs44bnJU8qZy~YnbS|h1Rg8&T~#Eq_2=goSoZU`I~>^d
zC&{>Za1g%cfeL3QM%>n~NzST;S}5B-KdSC7VAq$W<gn-^<!L|3$!Qn}pAnpV;@9i|
zPg^60Z)**Ln~kE#N$6`zRsKq!5P_S&+TR&Pc1uXuc>aDyt^W80_V)nM>tk>|O_R|#
zLS+J0+*G;8bcvegL+=a{mZp-$Eu~(SKCzYrt19C<>BB6jXFaKzO!Of+L9V~^ts<qE
zpRT`FNfe`^UhU9y?U17sEWJ1x`$E$@e>Zh4HP%j^NwxocIl{U98mcnaR&v7Gp}6t4
zawDGqCoQ!26iiF?T0)56rE|Q|C#yJ)GsdA-9XmopWWr!Yw2^`zCBn^yua}mwRAa)r
zxCJ$r>^xTJlT<IkMcX{Ty{0lm5KrRzd?Q>6w^7E4F6x>gM+hr0BJNP#*rf4lYhF2$
za@dO(e&a>m@~+MmLQ!~(bTQ7>u>EskEl>bfSmY{a?ep~*Nm>AF6`swtwVD^K218X6
zy-SK_ovxln8Dw?w%E){?rr7;D_6U*l%lBNpC!plx;JNWrOOYqcc%a~tIp)~myyjfJ
z1Jb#veu3C2%N})e@hwx}Gac!eY6Q>Defhca9Xa9yzW-Ya>!VoU9lB0u@-e&6?eG>0
zXkEX*DtW6jJAGm-Omj2}kVg8$9~2w|;I^@@jdw{0sp|4&hWg#@g(8cOsL-vWf)|Si
z&zN`z)alo&oQ0@7u%|;&zB7vw+beVRFQ-2bY&ToUda{HuR2!$%Z^~COO_?FBxWqS7
zMyTEu&&<Cm$t_a4gDDg<L4R|d#cjd&HdiTy7X{bYWrcDMk3%s^HbuiBLN2<bqD2$U
zQG8y^Gpc1pkyz%%`ZLk0$q89;yBGly2GWxg?}ah}CVht+NEXN8yj&+`=R$XyRz%!&
zI^^SQpT2+PsCy?%o7WU4osCxpvyJGr7FJ+|U7GAtk`Zx!3V#BWf>ilov|P_^X1ei(
zPZjC141OKg4#!|v&>oJ<?W~w~4~b%*e$n=il@7t?v{&5m<f*KBkC@_Eo_G?gM+j(!
z5^-BoTX>)hO|13ghe@<(MAxAYphtIe#7U{=><Y>%)o5%I<{w@>XAkX4jEN)fdBqb&
ztQ>n8krP_PW{Wiv08c=%Q%V!iB}Sd9q_o%o(^b4=J>dF}BJZ6Zxva!8;h|v`QBM9Z
zN+~7O%D6Bc)#dp4^Q^w9p|=qNyySA)uhw(-miTVb18vSNFzfqv$A4JsRM|3$us3f5
z@&KC!o|`a}x<msbg=ZV_g$eP)2R`Z&hafcDO7k#G!?@yF{5elYqILeB4#aUnptT6!
z(Sqw@s6B#-uNzJhR!l!sy2wysX%qiSR*Ew0tG7)3#){p;!2f#8(6oP8bbj2Z6Bz1M
zHBMWjJk!J1fO*2MYePj$CyrO0&7!|c>iyGN_yZ3vN=s}-aN=_wC#^{mi@o{GTnFOj
z<z>aFhGye5m+HzhJa6v>pn*XxA>-B~codfaW&VW`y6Hl)LoAkvHLks7!8cEJl0FBU
zCqLwHlv#hH&QEoEYg3aS$E7{Ii|q)Wu@2ROM>%kjYAWi5Uv)F3RY_q|>WXyyNNa_o
zmwr5)qDP&M-f=)iGJ)xAB@91&(!GIP@Vf~1DfK_BSX-zjjVc=@6sSsr33qi%CAT<~
zoOU680Juq;x3|xoF36lCk)8@$Cd-bYf!Nktf3e>F+x!>7w6VMt?&{ouyyDLRj*b7%
z<O--e`n94z__q`KLH1I-tQ)l(O^({scf59CI)8mKIysv{mZW&4@LfPKvgIgZM9yZb
z#IseCSNpZYf6jjmvolbIlkb6jk2-7;HhTYI86hx@eYgIIzBZGG0H6t;_x!4cNIBHO
z!+Ko1U#APsjehnIE5i={c`lIkt3zZscYWUy!<`NzRrd4Urz>L+#HKN9O<)rJiRN4z
zhPsNrqu7ox`Q47dbFF;49FxNa^pYa~-#HwdSa2WQVbK)M?XbADp?_Eb6lIcEV#I#J
zo+i$iMiWe+B9N6KJIXgY*iXh;)(0v-1<=Rq{f%g?xZbIhdLtS`z-FmTI?JH1o2??$
zE)sBx9*Oj8wRoD%Gqws~iG#8=`*h)lLqDXpY5g)Gxv&7X{+DnzVw-DGBYUH?PRtF)
zV&C<Y!#j<=<*}&d4f5_WUyxf)qvogWaoqdfyXnSIOflx^1AC5I+#H<~-+Y07MO}<y
zmP0Xh<@Kps^K2INi!979Rmh&ej#LfaGPucZ>Xb1WL!^V8=}>%5s_N46SQvfyXWNbO
zWXrVv^~d8Rl}BfM-GBJ|6oKUbeT%x7pU{6MM~{IP09ZlOo*#DY0=4$2=&=9*G^_9y
zXzXGIjwtu_nKIj8EeB77ez0V^Kwb7TU82lqL2B|_iVx#xZ%8Z_T*iZgg{PhFxM6B2
zz>Pk|XUo)&^rz2j-}k`k;=Y~M7gS4XJ~M@+NIX8WF+Y?~v~dRBp1A@$RXSFYVh8?q
zB`n^kz-6^K$XqafKNqXF1Rh8s;Y$MMNL^_1Sw2GHp6I%2mu#xCYsF;SFlxVf3cGI1
z4SvI8lzpDwa7)xyq6%R!4wRJdVI0rhSZIt5o7Y06nvsWtZe-l_&FY5*uaPPkQM;08
zK#K-CBHT9IiRpl;C%(DU{9@9Ek=fKbY~1A@je+7o#lpw5>@A*zu8~y@aB%{(MU@>o
z%r#3=ae@Fcv4-4iOWs`N2GkydnwwXOs6F%H0--OA_6z}R9blGfTYorF^HkYy^gO?I
zc<EQ8gtPD_j;tFZ$UG0g{USjpP<E>xfCdW1WW;T;_UrEUaXYu{-dMx#tYBlA$M+{)
z7jq^LI1wOLK;+MZnsHnMz)A2Q$oXP}OG3=uN654==#tGXhw9#t!uDZ8q7r^6x61>1
za07~c#LO@Y%_023Gwu>v5dEMDIgthl@n1j!A%`sw^d*}h4iiPl&etal30p~+UdZ0E
zhSdPmI%LG@0q?o2$Tz<MkX~t$&=AX%$pZzeZi5?Z+<Ob`jHGIWuGe9`dMg(GGs+@6
z{;Vg)NbakOp+`9v@}SP`uIEd*g-#ESe&QdiV>6K^C<V5lue*|0KAiQV0+kOk9&QEj
zYpy<3hYO6|$k&gI4oRM1$=^9#n~~U*gYUZ9%Fux+8G%2=;8IIi7%iR5Ne%Vc`i%MH
zcWIx#9^yTF^bAGaRU|24Oln#I%|Nh_k%Z@l!T{SOPueiyQG(>>P`3JiSm9z$FR4rx
zlQ8oa0!3AJf@y4v7J=4_v{`Q3&K=fy;}S9!xBmaI3YTCvT?u7xDZ4f}W+HK)F5#Y&
zY_{3Dm8>a2-W)D9i9N-TVbr+|z+%PeF<FWvRN3dZnxpP##`=rR+|E5(!Rr=?Ot>g&
zG0BlL{rlf=ui&JX?8~xd3WQRCgKK=)hgZYo^@#*N{hkBgw%exGen!Mb!e!qis?VJw
zty)|BniI|NMjXg@!w@Kn1amXoVV|m`-fK_SJwf$0<~#aT_;M|CBRZP+(?xE-tIF#F
z7UCv8D3gJfr^^{f1`R8^Jm;LF|Eg36<1-L>#|5eMZLqU!rls5IIPGim=e-TTLfCjU
zF{-|03;YPasQloOT-G%IN%SI>UOYkeNHk!fxjFcYGkvU1aV~jTt^Q|$*`~$+TK54}
z#rQ?#boJ@t;3JI?5H+OI;ssFQd`mkvr|g6snYA*p{+c|tbSx?>4~Ps5H@YtO$z3&6
zw53OWj(y`3+43mS*TU;@#fr&zDO#+fqxJ8uqDS~bJD4EI<yIRM`L4*vg5lJV)X9Dq
zHM=(^NeV<%6YGCivYZ}M-vW}E9DLGtoUO-944Q9%Y*Lv09prUR;`->KQvLW+q<kWv
zA2nYp5l2XP{;oO{D8Y>8LCa1Lk4tS#F5Uw3W>vuL=RNOQ2Qm=x{|Ycepo02XMxlca
z=jb}z1U=yCgnh?wbX7$xo!#8$Fre;VOE9}&plb^7xrWv&#f5^9jW4W^?3;Y<x{nR`
zPVRXYc^9ZC!j>E>x~}#hm!?(T^8G6`q(2(L{Hw$*SXL*b_O3>T$Rl21)Qrhl%<ECc
z=2uos%g+r=uig#ajCv%>Jr1#rnI>fO_qZ5Gl2o{mc(y9)cp76r$C2-*nh~M_NayuW
zce9DYBv1Cna=BHzVa_kz@))wuY0$x&!1MgWnk+kxe;9-P!!ll*WGjv||Jv|X3o4iG
zcG#<RNQ8+40`q+Hr9x*!vjIj%vkMLyB3~6$1Kd?cXi!cxS5<GNyU$6}A^RuXz{C3>
zpyD6auFl_HRpEX@^ikL#Fp|JkCc^ymAZIk?l8#N=Yu_Okf`mYp{D);#r3+Cw2&4m=
zQ!`yxhcuW`!!{`7`Dxtmm{ml?YAMfL_18ttfkxj6gF5EE)eHF_WxQBa1cf<IJ&3%m
z75{?@MQ!>#5`&E`&|u%^I`iEk=kh&>3EId+XJ^+NgcI47;W#|{H;XT01oyW&&Id<(
zGrU;QJXfcBBX(QC;Y`8kgLk?^?^JuV^TdV%nPa2kgs97UF9oE|$ad!{bz6nR<a-u?
zQPqmbs>t-PZ7K8t1L@W%i#pG8Jx3ICv+*_@SvVX1ALxvGUyCT(s9X0R78%CFOZ|`#
z@xabnZpMn@o63{)FDCwn^^0cWprdGp-?QFokuioVfIxWv<f%D#NaN$&mkJAnAFDAW
zwySrQ-IPL)PNO<9@1y{j_7{l6|FEvgqsTHev_tv(Ey#u&Ed^A{58Au<$k9ZBILZZ%
zkacdYt+Rj&np<>m*E(~X;5h*p4Rn~dR4|Rws4;-g@PeZOvZ7MEc2=~jd)H7X`cUQE
zWXDZq!>^B=?>A&!KwF7mpGj_{KkZ3qoO0ugWWJHyMJ?<aAaT^?Uo3;Ucxpbtt6VU^
z+8QrM<J{Vj05VO$nF3=i>2B9A{O7+1)~>&4gRbq5-r)&jQab;N^bFYYLVZE|+O;EH
z>ny#x)pysJD~#2n=~K7Fw-RFqHnkl<`>ylQCl^S`oZtr}u)=;)%HuJNVgFCuJYR;o
z^uyps^y-cS2i^k6Yn;-j&xUSb-bzah=>K8NvK^SomJi2UD_x3&<z@`=>CE_Os`%Cx
zllNmj;vjP2%l~VgcQ|k*qRC^top=rv&*SdW^B0mvFw#F285uZ^?Br}1X>Ng2!$GLa
zjiPJ_9LSNK5Y%N3FAPcC;`RYJ#AL@xpo8IgX*PZ>OYW#mWo(;B+=!uQ3cjqI<MPxy
z#_Y}Ym>8jv`Kwa>0og;>g-ak3%$KCHWi6UyR0u<8C|BbUG)N|>yOQ|llCpcBy$@iM
zxdw+D4m>7P6Q|M8)jCl9(ug}>D&f%J>ET>Xs^j;!<Msq%08%z*YskFq;wux8G^_MA
ziGloCk(FB3^r|cGBc_U<O}N%rRSyJW4@IJ|f76)TfT%VJlT!cRAAbXNVv3`R)r-U!
zuV#HR%ROF82mkO^%dyJ{JE`FM!5}shLIRgAo<>a<wl#6|DQD+&XBKl#Bz{iAe|$_H
z<ATyDi1@i4mpPyl*XSC3a2;r0Pu}t?`M)AF9Wi6s`!cazwAj3v)A0u)md}rZ6T#SL
zRn&9$M}HtbTmvGO#*g3v_J@N1uzHD$FMylV+G}leh_RW6R;g%|WNLlQE`Q9kuv6Bz
zAU-eZ;SJ<a9Nd+lLW-s0l{jP)ut4=H_!9+n+Z0Qu##v6{rkdRMd5Ix4pdx3;QeD?e
zVn4t2?0L&V1q!8KzvK4xW{Tc(%81sKza5$QYd(*8q}sam4{J#SrTGmoJX33i6wJNf
z{rCfXKk@t%CJ$H+F>B=-)2D<t1sF~98x~A!3wp6^U7`!I(REl4C@7)aca~Spn4dqt
z+z)^*C&w`&QF8Oi*F1qilg@iuYUvu7)?y5FlxHT{v7;PQkFBkS6$@GczNB#zZ&`0?
z9>`^eTl<Wd_u`YnUgN{M=cLlv<11Sp^<a}%mTn>|(e;zrgcEZDf1Z&{F;n5FE{uHu
zvF-Q#otC>6jyj|iY_A?7gdUY1xTr_3U55v}<P4AGM1MYXE)Dtkln>B<28@SuZF+}{
z<A;M6e(7Ytxii%s(`>C+EsiPHlf9nc%%lvOk|#ovM{@L5DTDVl8nlPzgaJDvEt1-<
zhE%y7Y4eZb1G39+8EALsDtZb=O9m2OOqsKgGoYTUyXYgodQ4TH7$lx<skPaex{|!z
z*F<-d`B+9srcCO4yRO}<zh^=+#YB2#A8&tTpFXhlX<#R8SO(W7zBdY@8~Z5dRm;-#
zXGpqQNzB$&ON;VX;=5d$Rp9H;-BqcTMRS&DlCZLN7X_1c=&~}O&&D)*{=@p*9K5Eg
z!Z-Q@W8bNhqe2r@EifcA(`8WIhnmS6FkQPbzmw}Bf=PbLj@{js5S=azfBlGumLZ3&
zZ!zGA^xR1Hzj7D8e0WU=9y!$wVzErTnVTN`M5^!$^w)y~dnZMam3L#=w0f4E70Rc+
z^bCFdvXC~_HI@q6E~|`l6gM$fy~MBDb@;b4q+@||bgN<;>tbwvabYV^a-f;RJ=Q?A
z&Y<LJOUy9+qwPg6(xv@rMjZ)z4yQlccG)hKlt!y?6YAnZc#2lS_*APCkw8^J3E0Jk
zen1efPW~?a&Hh~=@zM`M?dGOz71BAV#rKhy-E3h&v?K${URZd-fQiVFKaUM0x5tYS
zYMFW<JoejF${9~&&sn~-SosHYnVXLnWw}bq5cBa0blT%y*P-&rvWC!&_>r-?lx$D8
zW9ycABmcp3fqLHr*|+!Uci@qzG>x+{Sh3+iYUu~CDW&@#d9+RHqDS-f=0b|wBXypR
z)NQ^V38b#pHP=+^6(-}*@{M6*RcBw@<@6_P;}$*{#_|S^sS`S1bEX#b$MRl`yqn(&
z%>xBHpRTqdzYbHj+#^QQq|ofSP%8fqK=xNLxz}zhtzMDh-u++w4kQEs@3y%7!<tf7
zb5<r)zH*)YKAe*~D;ax5UaktMltc*Zjhpx6AM<;yz9u`{X@V>OsH(o9`VJq+n)7`S
z)T>I+b8fVT=;S{v%dSf!D;v-SSuN01`2G*;lFs-kWOD?y7qALAaYSM|dnLpJW>L@c
zK<MySkm)SQUVr7&`{+xOxKhIam~P}R2QtW(OqF+e{buS`-7Zdj(GzmqG9sN+3vaC(
z!gOo$ijn*DbsbLnP-t;fNwFVAuirarVH$<l2>rBM@Pr25$~3H4AR|g-jTAD-25>K+
zOVV;*AZE!<1EFLA!dTYH_0+v3m`u`n{p!9_y8#YvsAuaw4#I}QUtma#2pyZzQ8o}L
zB$3Y_=#10%+_Xfa=3#rS6qu}W^e>=)koWu)?F~jV&|D+mVn&Sv3TSR~_Sdei-eSJ7
zFOB?9`lN?!^y2TN1EA3q41h*R+wJM<4I^+L_Z@`>=9f9f!LTiCoVh*ktU#!BFNe&|
z5AvLWp>Jx$jp1{y|ABOgN-r+M17cqXKUkXiglr=u#`>K~AP}#7H~0gLmFSNYN|OGQ
zLo$fJ#w{SwM9w2e9c;XA^nam;vm6Sd#%#z_7S^9)3Zn*!`5lk*PS%mn`AbUV#WLR_
z-vvLh_e_e2|KhUe3upkpwExJ9fBYiw<IKcu=3j^PC?gKJ*8<=#g5>9G!l*zkX(j!Y
zELJ<VDiNHI@Vn<@AAVd#dbnKLa!0Mr=PJ~3&Q0Knsj&H;+3_<t?s-?r2~f&p7=N`|
zf2B~uxb@0!R>YAmaVrJC)~{Ni!6!9Uybb#L$4q!%*Zv0@)Yq=D1_7d^w}=O}WZwU*
zzH^&=Fqh0I&&*S$AHqV$O|>4F9KO1=ZvWPNEy=usK#^{Q^V9ICUC~!Ob4aR~jJthR
z0iUvzV%Fo0s{DO51T5=%4fAQE)5L{r$0els&vli{a`(-vR)n7o_$X7|SU<S$9P<*$
znOltKw4mV170MpP65Z&m(w*--caI+-xkQntnKLF>HU+0Do@-|UG*bmPE`GJj9j8_a
zza@TCXMn;qlQ|-U1@`UMjp%$UdjqUMQ$I~`zLo=H5LsQT32|3tTDeW1e=Ci+4f_Y5
z1UBSI6XfbtRfq9w?HB(JEC9_FnGo8@-?-m@`G;j62CBFt8f?!I+m;op-c_KN_U=Ks
z8IQGTB||)?o}y%NFH4_Ru-}+VT^g#RrjI25^KM%lTyrHS1m>Djc=yw)Z|tj7rU1p?
z^J@?**cRVzkIk6PDB@_-=lPy*!NZCq;`}p2G{aw@LI~Z`0+^U40}4df=zWwrF?Dhd
zb3ptZy^rs$qK;#YY?mJRvUI*DGw<N~5SSV=e_)@yj*@1hX3hm(GYStq<veC-Nj!L0
zD?3H<d^w2kfjy1;h<(1Xil6g8pj}kag?d&^4QF`VRx_PE?aEu0=u5cTwWZeKHyu{l
zLuD{5MP<fo16OGWT^u2(J0PNK$~(T-49K?=(}{9OBscNt(S7jEHw8De1}&!1N<vh&
zo2VN=fj+<wB0tIHM1AMzYY$h(cKMTt=?5JVv@Nuj@ijjVR_;^#V?!?z`u&T)bT%oS
zZWo)Y(zw%6cFd6Nl%5+>;GrinYh%>Yc)dhfw1%lF87+1*pX#@3s%OiuiAB*&%0Jk$
zKuJ{1Gk(-gkH8Nz_mp=zzZpmD8|{QKI}QSo401_lNHK4-aXorYa{_4jwML?h_vgcX
z<@Q6*2x~F!VO^EdlsD!N1E9*Mo%VjgD4p!s3jOa}aW`pqno)c-XsrJoWK237dYV}W
z^P*mO_-daTQFAwbKaMU{HH|BSu!&-<uZjskD%%QM32d88C>BhZboC&C802UM(WeMh
z+ZgwCNi7uNuFNH+@Hg^B(0)E_Vou_Yv!Vs`yR2ZpsVVn<1apg#tvv<m2C)w)$R2&0
zGvv|nIYG-Ex4<1k;n_0Q0x4Y20Hw8WPFQXLUVOim{Gj{_xWM>JZnPrkxB#M1Z!f;0
zqncI{19t>HU54n8jHqEQR8U7gm5YOg4zpM6CCjg?sg&kvR%s?QDmJBDJSg6iz2EYy
z3sKq4tffDmU3w`01Ss+WT)SDe#q+YNM^-mG+oOjzFnL?-)5!yvTyJUqlD579JX57x
zr6=-fzx^vHufoS9ptD>Qc`^KUc}TDaIKJQD`iQ<O^V6g`{#ua613PS5wKMdMklvOx
z=%gQ;2Y~$Pv(ta&?*g6BF}5SCcBsGPnFeNfx_&?!GomhmozpiYiPr;c7~rOZa_RrZ
zvV@w~D0}5EQ2YRhC=owvsu}|;P=j696>o3N?|A!LH1DZcXv7%HK*wt=SNi>aG7(zM
zk-G7d8Lr?J@t6QUgCLeg{@O&w``5x`2Ol*4VOfJx)Y=lC%pC8g>LOw}i$>~$_<n^T
zui)E8#0F|@NB+Nj8ujz{06Z(`vMA3yXFikI6^GNl@|4P(Sz8KOnkqxRx72m5zj67D
zE>c@D<Udw=ass|E>>T!DYOYDz7%99CZ=Ye=thYHGuSMt5i$oMtd7Lyc2nmVgHvsaA
z5V|Ukwf7j3en!B#`;7>eT^7c$Myr)_&q$hTM+TL+AsSqueG_J$bo<Cq!)}D?ec-U+
z_sT-E)1Xy-r%MbMZqrMHEW_3~#pcxaxr>>4E!vht$R{DRqL-BAH08?6osgm!k(6d3
zf%|*m9KnQf#)L-lHjllRZV26`2tA6#!fGrr>9EDV<E072FE=mh_j4?RLSf)S7ht?X
zfhkq*k~9%)ozPZKtulJ}si#GjEx@P~xj1awUv?nDI&#V79shbdKJR0Th`O&U_^Q;>
z-d{M<j}rD!Vq4TF>G{p5PX9e35VEQYk+%JF2#eq`r&39Rq4s3#<n^<4Z2n=TTl;QS
z15IlLK>ie@8H3xd%T-^!Bcmq7T`UXj7P)Ju(QmXGZdMaJ%bTga#`(o+?|<~JFk>%}
zB#~#s$TaPv@)>V0Y-LX3+oXZ5;PjtO;rx_$D{X8c&zQ3|2hHXEg@-6`zP{jNi)lW^
zc_wjNrqgwfaDjGeK-kU>rj->;MsMxRPi}qv7d|Uq0eFegS3Hns(mGwr&V}hu3zf_Z
z^D5CMTn)XOw}%zB%9EcM6&)zM2c%m$sNMuU+iX#JA(wDyS!CL(g$=!*`}u3;)ImUO
zyMc?Sis`PdTWCz#1>R_XX3OUi;aEH%Z9Dw=uqed4PL)cVpv2f}iKB#XK*$26jh~kJ
zV|>*Wa(;SVEcJ4^<h<eYeM)i?4M<QTfM<VsuBNxejyYD#qfRz<Jg)JsrdH7oYXIco
z`0VnaZ<_n+m%{1Cu)<j`YR`C|V5>!!@S_$)k!q-HOWKJ+Ga4U(((W^+ZuR4rmHE^z
ztcDRN?5uvF$K>>hOxp4013%D$a`5y|RJ~AEQCqDZDX~*E6lrL0kD?1IOU%Z{5(ex(
zr(j?|yuMQg9u})-h<TLub(U4+6Qu_KJ+kxpsGS^U>e;dRN$PEV&tX<(3=rfQUEVGZ
z)caUp+ugSoML0Xu(cbqK3y-_0?yX(Co9;S1;C{f@F+z~N=6t669`_he7$x8w$pEHr
z<6|hruPA;hw<lHo>4kuL%4TINvN5I5Gl%zVuCm<D&dc1sUYb^J2$`TPWq&4jMe;{}
zLn)9Wb+T%_jTZ3L%B-b{@>u9RsdNVy#Q(!Wy%MnHraY{LWo|wL9TowE;b-k3C3f<5
zJ{%=p4B-_jVA=bya|M1`S;-^&V}sLN6_Ri=tox}wCawdR`&yh1-DIw4!I_%+H1QPq
zDru>JVtEd2k!f6fP5DFIaD-TndFlOcIaxi~ZIK=cJ5i|eWD_tRJMT6ki+87s!4<Hb
zA~olzcH%huL~bKJR8u03GbZ+0Ovl;ZXd?@Ddw+(JQ2Dz7IpYs#?|y4?$bU|B)0p4_
zVD~rrt&+dNFw2mbnj7Jw5rS_(Q%1h<mf$ks=|*gAHfLXS@TI8@yc<IWq{O%YTN##I
z<R)b2Db*gw7|!h4u``tC7v#rRJA80bFznX@dns&VQ(yWf-ZqxL`=?Q*qoipEg05v>
zZ}Qgl)I?h_$zZ_r{<m4LSet230DEF#QeV#<-`*MMf~g-81<KpQ{W!Jcd1!PWgzFyT
z7GvIZ&u~%999;w#{^5QP;WsDHV1eRl?CU?Fkp5ZME-u#G{@kY6EWXMH&jI(W2>y#1
zG+6inzOB^FkGP%wN)|yp9pwU;@juG;Rj1PU{xd<&{MmJ3zxeb^x>)`x)`3hL{Ly*H
z7Iq0hL~+$Y_Lw`WhBqp<ye@iAlnQp)p;F^C3Re=|h+b6MgP;8W-tUA|5fR4cN59Ma
z3n-vdpQjy+rCUfR0t`58LB|I1QfiM8Q*?U$@X|sKiNoT*@LH<V)<O?I;Pl)wkYyPF
z77EQ@oH=n(E{)QcI_(NS*o=r62dNFF4vMU4e|;&6QcHCkzu93fH`Z$`$W1dRCBO55
z@32|SRcT(-ez;r&UzIqWh9L$v$Q7#a#BYu@XlzES2BxpYa}K?b>`vIZh097pnYbj4
zP`e~fPwID|eL(Joh4Fy1Zp~pr%M6wj857(7lbsG(^CKT3N3&-Uk**)PPb4bC4GnG>
zFfGlPFvX;uOVA;)`*j65+T@9~y_!5TUkIMsxPB!~v^ZSSp;hMSQ|g0o6;njX?2)^l
zobkNq9`>C%uq+RcY`08zYj)>aw$BaOyJIo!5Y$f9(rv`p_n}=GJR0Q8X1=B^r#>qa
zXU5DUxY|_qM{E&LimSus`{3RKo(Be5+iI6hN>@CCb^x0QnCz+p)`(9w+lJm{)nSE-
z4DDz7$-jQ|Yc}v_$D)zCNHD&WB}=;@5xT5=Io*K@{7jYR&r8<j;ebezvfo1|f3_{_
zMPD!?nO~d;KY%LD0AbePh^qu80L@5T53Uton25f8Mwg)Z`cd~*(s)fp$&OvDmAoE$
z!2ic;2jLf;uu#prHa(1WBTQa=*aaDfN%YDgk7>%7K#YFLcHfsTS~1{)U1wC$#Vn69
z^{ml{hMVC+PJ#`YW+%gaR8vh=9mv<IJo^9Rx-Kh|xseskfuyj6y0&-jCW)5H-@Th1
zD6m2m;!vh<IkD+tKIh0Qbyd=P7k>_W0_pO?e!}5RemcbP%LU~y{Zd&dK{`x_0lUR$
zwd-hBKrxb%f%aY-rWwJeZe={%`W4!Yc}?6dS`)IWMLz&mKN5Rc9E`aJw!C>KRp~xR
z|I$8$Qr_IKNs$~^WcSomn=6et#NcfG$K0Ddc-t;Yqd!_8+$)>bOZ*BEH&r@SOk!Ds
z7#9)CH~7^2Ic@=>pUSE1S1MD!{Q3NMy6ytHJca<rmeSb00T8_PYcn)Mc^M5)T#^hE
zn>iJ4tQVv?H_w#ZV<61ng5|;0_s!bYw0~-2nw*6YG|IN@iHGYe^e<b?8A}0ls?CYs
zB5}uaaAasT3;J(zSj8pc32Yd~6OQ<lvPp*sL0QJ8O%|A(RSl}FC~?7uH}?$Qyl-xY
zWXG@zy(nIm3FnntFf^y}I#gqd=@0kLU-`J}`N40lKbv<DoAn8Gx_}0ZX_17@HebOA
z5A)~PF`@ls=f{|wJ8htZ%1zL(k*4W=-7PPv-q<1$sGZ6R;BB~)00$RlCSUqxJI?cO
zkXK7dcX3k<oX6KbbM2CPPh1h86%J5I1(Tj+r==fWKl8jv_cj-YIxW`HZKbYa0P5>E
z{M{PRu~mbBm<v^&EO<tP;Wtr20yHH>zd6Pv<7AtkxJ)}n`z-MuXzN6G&-JLBe(|me
zP<YpMd%bAcjg+KbG{60c3ka@#1IY{x6w-0X6P14jZw2a+u-}1uYxc{?I$TMIXt-AX
zfDs{Vtg|uZlwFOGKl1{^=UTNw#Y&AsBoO|zj6om!^Z{kxb8QDRhxPO|@~tpt;Nz;7
z^$k{sTbY~wmks^q5_O!)m1U44?hSbd+34y;k1BbJFW!3nu}+wp#;W=tX+JBkI`k%&
zoTJcvd|&^CAEo}Ch7VMmzO==$=W0k?Q~N!nfm0}N@uCVx@@xeP(!VIvY&SO9bvtmi
zCoyCu?XY7Pf;=!q7k<4-FsdkI`zh;M=!NprkuUx_5s<%eVkYZpHY`VFF*gNvY@q@l
z^*a4eM1Ildnn=Lg^+V?Ke><nOXNxj=<cT)d-Tt*K4@fQQMJ7|8%kF-=VMU8au(`5y
zB<+ye9^dGH76XO42F}F(Ez~n)LGKO*)fzPPyf^De#@za%aA4%)=ti2s+4d*@us$`4
zLrP7y*l36!^8-cNVGU5`=~wB#zkgbd`=9`!)0w!o>$;UMz=&Yv`p$nNdMD50E|W1z
zhJgu1G9{6jc1d_KK_9yOBp<rij?iAg8+*6d53B%5RK>2`VPGeZpo>fIWyD{9gkia?
zHo+aea`N)ad750t#2L-Ehq9+jwj;u&+NkYDvkUqO=RYl&vHz*IAfk=_Uc$v_+W$;=
z{0UU9YzyFhkH1;Z>D+%W+3;g@r+(E8-R--M{ebhrVP1Xr=fX@ga!Jq2%hkkK#fz0$
ze#?dortW4$6LNXSWiK!kFKGOvUcG?PCpHa^ehSft&o{JO2B;k~WPiq5iRWo%+;Cfq
zanqnk@W}RE7+XF5gP#11W|iu}C7F<~em-ImCdc55?pHAx$ebT1E@E0f@DU3dRYxw$
zEHI2TV}*+hT~{iO;~xlif6z&ql-};5%iOVLy?;>d7c1A;(}ZrmZBvgrPc{9}0ynXp
z_S>(>@Xs2_vhqhh33H1x^f*M#F}~o?k*B0nXC&XOI{1c-CvgEv`Jx7CkmCAt<E-w)
z($~t}NHcMrB%i4vXG{3wvCYrqX(jd`t0ew6@|jsN&o+}n<&^07t3C)9GOE8(VvY_1
zeuelHGN%mS@=q%$@t@0?QE4c#itLXvu&Sk!Q5TxU|LJ;V>g}Ztx;$pNsAh0A5GVZo
zOQTQ6b0pW6C!mawr*cJ0e0Ev>9oDFin!0=yn!ds458DY?5z4x%cUg=#>ajfB=iW+&
zO@-R61dSBfLN`<3W}+gcydg=x*ZN%S&tPfqUNS#Bve~E;ew@afl|-OMOpQq=q5s%-
z{A4E&UqwSjyPuu3LToTSo4Du3l=VM+TwdDiA<$G-%M>X;$w{gjtwtIbd|G%^Qg861
zCRPMPa14S5p*fRE>!wqMh;`ufuMKzN^CBA`@3}$5{m$E)9c5s#Hp_6zK7N86u+>7U
ztY8Id@zN*q@~EfFQ<(<X8V<)YU$LdYZnNS_`Ry&AhQ_aUKd+<9@O3XI>elD!6yL7B
z)ofj8ltwh_E^0g`Y)@3VLn~SL{S^ECDj_B&)SzCPnawTa7hBx5d#6u4ujzgEJT*lJ
zgOcoLl>H%lzs|sLBb!69gHAq@@@1Xkuk6>p7TL4kn&WJ<@nLG}y)Om|U;oUhebpk2
z-b<yY`QzHl(qDvTxrJmH48P!#CLkK8MmS+c)kDf*$x#~k!5E_!23(fhL2(z%sMtu-
zJI#qh4I956s$z=5wK_n>o`RWjh#C7%HHgnh16D7@G%|IBE3wJ4H{Uq&90q|e3kAoN
zU4OY@yi1>`zmWZQG^C#UVAD2pY!_)sw(7-gaSfT<5-g_B4+M=>7Hq2pkmfc-*?Idn
zFLYDmfgWsY_?7{q3^q;U3;mV){j(Nw1x>cGbLSWhWr^xJ1h?~q{d$CI@i{%rj)E(q
zq|n^9a`*#{CXh?&W?Yr(X;?)h$^_b1MEl;HK6cAVuiRjZB2`1Hym<4id#paIPjBtc
z#0z7kFNa5q9Ci1Vqp^<2L1R2JowDDg6M$BMSfthRgE88NNm-OgOWT97lcsMBO86Ld
z4*<C10lNx#Ks|OhCko&O^i%CxHfJlMYj89Z>EId{H5l3Y*DEHDOyuS<=0&=g5oR5!
z4|EI!$`Z<0hlb5IhtxYMA9P78uj0@tH{_*qjIG;ZS0s$=*fOb9h}&d}v9ufYWZG=6
zQc`}__CILB+0H=CR>+&L8)q(oC-iIoqxE=RFE6jiJQfrmdK{YVAzc1e^;<0tBcsnZ
zcKkQZEaafi8k|NBr}%}tgP)is&f!QB;=1B%e+3$3u`;1zE_`!V;dm-*Tb^_AuiZ=q
z<BOhU$wqN2zSz;b8CEe?GTOLGeW<26Qnu@6)Zu*<oQi!0;{}KlMI8vn=Dz9`hLDRO
z*a(p&0=fkeeQoY8^!bF`&vUUr-nQ3a+Yia{=|e}hskL#p@|Wm0aVtP&?LQA}n~}BM
zXsbdlz$(~kJqy6^(mBs=M~QxIcMG@eV=8xT`2O@|shp{>vu(<9VzurCk6yo6&JzZ&
zFLg>E$P#hC9CSdg%7k}<S{@JskTwK1nl04G!F~g%^y+N={D#O|vb-cHVY>zwV)jlP
zqK%`MbQ8pM5hWrR%~*1YF8%Ty#FK%rU*KT~kK*-Imyd3oE9Dgs`vgUd$lZCxwX@ty
zx(@$GGU_+*pMmN*g~=~8O8#M4Xgfu5nP(|}2ZjZ*pWzm-*v?Lqb!L1eN*N61Et6u{
ze%Cf#Uwg3mMo$R<vf@Db>$WO6j%GMR!Y&K{ceajedTnxYxhJngPVlObn=hA5{(3eQ
zjps~9J|>|(e%=lH6SYBWb-|34Fx+ecj8(__c@tx*s#jnj?wE20{zONA3w(SR9rwyN
zs&gldp(;TN=f&ykEvh&!GcXM;ydg$dX|?GYL6Cbap7Edbj$ccx_#$%fWfQnq+>EdG
zLey}qq&bV{FzH-x&+2RF%Gvxpcc&IET@k}0aEV&n$ZE&D>W?a?pUF)}O}qqdP(I`S
z>QTqkq=yEarWYAxfVKpMLK^dkd|Ot$Hr;%=J3@F9vKw>1*;@0)gNe1VR7Gw9TCsly
zcZ1;brs8ddHmqL?Ke%X>ttMBXer4uS#*D;`{d(rlk&af_$w}wr@DR5CvhyvC+AunZ
zfLDb4^w`MWn6BwH-&heY6_qK<qu+IjF7>J)QD}9_QJVMTIt<=yuKn3nAd*~0hLLM_
zK*DY5lB8dq&3Eu}R=B?*!N@#i+sEQ!CH<69m?bIu%%(nHVzkDCWtXf6?lPC7O3%)n
z5u#H%$iMrB)P$wVhq5*YAbM((9xhEvPrm@f(=**~_wY*zi_Fh+9INaTn`0jD*JzMK
zMu57tjQ92s9CyOG+;#H%E#R2;MQey<;*PJFIaA3c$^TO`&4K&htX270NW6fw(|4gt
zOxeH`$kQ5fR(8!7@OU@Zv5@nG{ZG+(XCuo>CSJq?UofP~HfEJ&&Vr_6)=|@!{Tr}A
zKK`t^Ah7VA|KrO}^E)qd2M_nOIEwpbOABMwVdbD;gWNS%;=aGAhwy22?DY()s#GmD
zh6U)l70hp+lg1IQ&2qZ!)j0wua?>n2helGnxN*QvfcDO2N6qqiH|PglI_i0K8N8%*
zTs8~d=DIu5*q(G)3wYV{qNSYf%zswT$q!)^4H*<}#W5)Y4g1-0p6d^%9Fh>x>hd1W
zu(74u8AZgX(k+xy4v;V5uZ@-^=Wx$$k8un7fnRfh6e_rm)I_}8_Ts~U=0&lWe>IsL
z0}nD$Ss0}e!w^6(lz$t!Gv6o=WuB>m`jLO9$t#vAHeQphCs&(n;~O>rjnz8K53vXY
z&Ry>;zKUO#i=M|O!)O)6rnP<6@9MCDZwx65eGX;0eFZx!Tl|@ba%dafImKTBk3eU*
z8cVnpo6%}2mnGa+l<f=JVv*>6Tv~fIbl87d$_~b1L(@&s8vKLsu6@}ZiBT~}KXr$l
zCZ2lAQLdNb&jmBN#E7Vsu-w$~kW<;URFm#hz+M<Gipo6e(|vm6Lxqd1GAZLPuZppu
zSEf??>UiLDd<w(8A_W=osS7`fCR0NM%@5jd1izu~I&}%U*!vnXPr^c}LyPocQV>qm
zzbo1DdQ_!Q+OAoG#ZnTEDlhBopC-rLkO=)v4p^J2{MglQe2E}c!LGL5a`Tc9P-4_1
z3KQ_<=2W%7R^$K7gN`LL<%v6#KX@T!wcz>|DyFTLD3hF$f{94bVKJkGDU7eBF@S>0
zzrFo3)2BJ<1_G4L8NTcxBlmsdZP^;gH-H{2VzLbHPSp!5;3en<)U~Um)H(K=U+d67
zuM2o_y1bJlHe-9ZpMdOuH|F|uAlpnBM~HmPTe#RKsrqF&^*4=eqXfytVx+wP+Fi=G
zV^{_tM9WN<8(91-p9-f!Dj4J5@2R4oHi6^B3BplY=6l}!i*aUJ`YX(9K#t5Xp50*z
z<%5+9fHI|e9nmiZ7oVB2loZ;0fV*1f<VTP-=Tot3$7t;ULg+6Hr40Ds8O>C5VTE)P
z7WO$1NZBHMN%shAgayoa2>-6p#n=A4lvUJmO?XeWjd!CF)%B)06)WlNi)QBQ(UfGN
zM!AxqAe}gUSNW*7(F-%t93w%+Iiorhdgf5=El1O?Aev1(Qs&+evZx8a=Kr+t3CVkQ
zYLW$nl=wsy!};DM=$NOung(m37q<L6*nAFf*k;Ud2A-MN&nnC<(Ehj~7o!c?@9y!!
zlMF>iRl0WVB?O~p=0Cvbv=}u~Ieg&y)vnCY6`bii3yr7+jY+Zm^p^=boY6SsqiT9}
zvfHD^jq#+|YrpLHkG?1<p!k|6d7D15hzKZZikve=-oycQkU&8p$vwrowr44jH#v9!
z1zxyaKWt!JgB5b@23T|ft@ort&vjQjUW^<0My<Xp_Mxt5{j0$JVg;dut;yxG`KhEK
ze2H!W&c-RRAain-ntt<+P;o%v(O-8MH`Z>%pYOn03H>(MvYrCc50O`~9-q-vd^b9@
z^n_O)deDk};HPE7SRXdn@=GVhxA&{~0PaNX9eBqshu?sA_nVhxfX#?LtuU}{geZ-<
z_ObexFZgxuqp+gy|1QwGAE^KaeHo?3MSU-*v?HJ6jw#j;dQ0f8KRS8}rYL`sGEyIg
z#n-f1@D9fpAaW>dluD^n>B})Ag`1tr&6lz&(OhApXvjXZ3x&g{+90g#Y*G>N`$u}D
zwMjNE%zO<cXrw*`5)h9J3X`pC+w+O54Mxe%4@h0_8#glDHzXkWJWD<8ybWY1*1jJ*
zSg1SnAgvJ?y=ull7VPtC#~;@NCktfcw4POb8_=|En>32-@7RW&eW1%xQH%q+ynIIY
zA-!b16~1e%ELkjmJPG+c>Lot1$yKs++)!w5@gnkkIp-Bk?hsnWIoCx$=Y6YCAj{0p
zF>uac*Gcs-D8<qu>Z)dDR#YwSs-*CWw85QOYo22z@_8b#BQPOW=IF|?vi$Q8mSz<M
zQtx6Nn7kX_xAXs<T8ZP<(pry2Gyxi1^g%br^$k(m8J<`>azLV7^4c|U$c2GxpI(*|
ziuNkcm#+I0-OS|rjBJc>u9edwPOsC(2%`Ur0oySGni~-KMWcXsh>~>=A#Rl)?Gx>Y
zJG|wE=(^0ex8J(#S%2-#uy!}k575Ol8K++whij>8AMknDhk0x3xjk8eZ>K+({pc0S
zKf~RG6!FR?YNWZYX-uW7&KLT7^N;xjUlRX5en2QT#WtKMg`YZ|3O!sYF-Vjd<;*zb
zZw!BGN~4|?>;<QRKitVFOu1?Iw|?m^iIGvp%ae0_`D!y+^Eb!Tou(Tu1N~5;h=B0-
zoD4MUXYe#m+J}cM_Vwu?0bZFF1pUu@)}@}I%YGI{O&j@?ChA|M<doF2BC^IA#Gwq8
zZwda>!Q)eB3sz4aTtP*dsuL0$BerTfWjB&{7xQVWpHS3fXvDkdkeDZ%$pH?6hLx=q
zUrUb*c!RD>H8Yzzqq#mqK3txv;-v!yT37E_k<5xk!eac7u>Jg|zcF5j-cx9O8pO?1
ziw|IP{F>jED`V<|j>D;dM;Mx#xBM(w#`-efQo0#I1+`@yi*m6udt+am*Z+8BUc_kR
zSm$x@j8&l)_6bsBY*ZpyPu^j+^As@i*Y5kk=qwgh_nPmZc&<>(V(V4!V%}&bn+@Sd
z0?-do3RM-j5i3u^*{1ImFCOjO=b1;^WbloBxn9Ze5Rw|n{mY^rxcC&2?1mHrL!)&A
zK7q^KT*FAtBdTiw-R`@?=6wTMZNBU`Yy;oig;AM4Oe!L%sFeKffg>8<Sj3{Umo><n
z`&P?%OMg)11C^x|YU-|#CMAeEJfBT|xRp7xFtrz-dU+GZ#_}VvX;)+s+;&+qNT-Z9
z-bD-(W^wdBohJUNgdiZ^oq?z+_qDRLGBr#w2bxJXAcW1l`YKOEr~0Y<o;@k9-O^OT
zwvHjfj4CBrr6opDnTLGB)Z|^d^eb>$$<=-1McgWB(4~dOOSq{=SZvFy<EpwS6;DRW
zd;IqwZznm=X;jL);EnI7ne&xanvUm)hdlMom2oz6$^4{%Nwttw4k+u356ihT)8fi-
z*OVgY1c*l=_BdOEDMZsL<A2*wjmwUDD5^sLnk4(ya2CaSKZ<A+TojMF>NhYO?yv~(
zDNVAJ9URN9S;Vu1Hwx-ej!>#>vdv7jO2p0OXXINvLM)xI0YlUVya~?mo9~rZB(8I0
zMrqp~B1dlMgO=u}^~7D3cKfP>R(;vkQ`uXI6T_^l7huKW>NLl)XX0KJD;gMCF~WO5
z*PfR-*VwJvQt?aIyw8*7di;N3eOuA3{XOIuaBg@4gz!`+g+8w;Kdn}av=_IV%|B65
zJ06C2g~`HKB$ZEE<G2#v2UNTCJ_V`iYa6T4reH&JH|qpT?ISEDHybK@*_?;HkNZ7G
z*}t;NH<#wIq;&8OYDLK6;v$LX+S*-eclD<4>+^>}=NquOdL~>6_*7=hpl@csOi_Wr
zR<wS;!)KPiX{7PeOYqTGmgBfhat~}@IV&xizejR01cZvnc#KJW2R7<kl^b`=y<7$*
zlsDDK)#nOtfYpxmm}ma(o3X<0c@r_{E=F_POuj#V!>>n&#5of0I3TArxQBY>8>1E@
zROTwM8z_;5)@o2vkQbLMh+=NeLRM=ygl}OE5TyPhv1m=xv9OHdfWO)!AAfSc?s6qL
zyIih7+#2_zPc6oyb<Jju$0k!#(D@%$dALBNN|h+A4|zN5pz^&y;wEa!kqC^?Tl|Ty
z7I$55ECcrA<{sC%%WkK}EeXd=wXNKA?6RDjYw;L4eZOoc6UdmesWMIcwBz_Hl0gJf
zi!bUo$8H0RwbQ89Wj&<2y$Mvtda&EG(Td51#S1Ik)MiBKw*$6szuPm1&0uRXt6iBp
zr3oRLX90vbO-d5qZ7N4^-~O4ZOdoiB#M;du%;?DgidBd()iLVHkP`+ynCs2y5HaQV
zEepH)U|G;W#;O8-GHK!04~Xtsf%g|%E#G>^DQ8bd1<Iu8B8JPYJt=dE$^Jw<@}i>V
zZAD{y6pSG~Ni4oV%iPrebG?5iEPj4$=+;(AlX0nQAYRpGgg{X(G9#i~f@`z2zJFmG
zR>)})@Rx3vVh~tjUASxWcC;*wGNYaHv7jQ-*O11hB7hC&@i>yb16QhN@b>;)sHk-$
zzZ9SD$bQWeRr@-DnI}Y6<f@#IbbIiC&G2X>_Q~v@)Avw9eJ!;y5y=#Psb6j}BxJyL
zC*q%DTcCV#c>pOgt=^&#$5lXYT1+N9?whe8FYC;wP(qXKwy&Ol%n+B1N#VQ}_{Lwq
zP*6V-P#k)y{`5F5@2c_{=YQv0gy~wJ9EY*kyz~Aqx*tX0O)*N0p!C>>W={kINMk3d
z{Hk*U^#e5`A;2u8Q|1Lu`ZQGXNX8BityqD<`CEhORs=cbm+ZT^$34AjT$9|=G@M<~
z(+bEB^QzKY1>o`T)~^+4hdn)M3kTr-RVqyyN}OfB=3kc4x699{>}kPOhsrR2x__ly
zdu_upocmRIBL4P64)vmy&UZ=2p$^TbJ-z<`tw^U~1Y{pf&_gHQ=RMDQ6>=~!k3-s-
zDOu=RisL;wKA5SQW0^=iA5l~S0DFT|D={48ip|HWIw4Wo(iN??^~n_phbQGZ$79y5
zti>_8fnRz@SY=S?_#?2PNv*<Ai&_<sGByY`av{hgAo_}~p@R&PN1^7fM1YKs)`_sD
zhjM=Mf54agC<qA1IUkX!03)?L1aDAJaaml_#9>%<=j&6(y!uh)N*5|ak6}uM9PK<e
zp`^y0iIhge=LbHyr+u98V{OCJp`-{w>Ce`rP|8L)tlyAx5s}UvNc<^6@^+uY6=`G#
z<#pzt_H~hRI&~cMsphug=QE{ToDs+AQ3!}!05P7Un$nWogYyjk08hrF*s6Y3=BZCW
z=1ic-aq2xqS1E#Wy!&u#RG?roFnvI(@}T8$lkPBT<00J4Df1Eu{AomP!=0pkYSD}i
zgCo%KieD%mhii7otuAKLDjQIFT&eb`{>?HHLEJqL6|n}Je>Ob3b|XDlZ6BGf2s}3-
zAHKR(K9P;z&lLIWVL4M<vz?AVmy{^ql`$=4mB@h}M$bf2Nc^j}gG91~eCX3RtcQ=t
zVy{OUM1dnw8h>>L(fU;TLpfp<trO1Vf?Ih!x#M(y?#Q@5tp*it*!owZNP<)-;axwv
z!*Km;I)4w?&&ne}ePV7uBgGu!h9)~n9AA*bo*Ue8NwI;(Ptey#KZb6TCK;lUaop^0
z@cnB#;>K%SeWD2vb}l|s`1GgDbV5*{!p!w0PD=hFrdyd5{H%W-YF{EY{v01;O#3S{
z{G=aa#ZA-P-qK4$N(n@ZhK(CN!t+mcjN}eJpW#f32Ux)%jyjP}akz8TRxa1j6V#_P
z0JqA*s?QXP0UwyFGlo-<`WmS$@1BD{TvoBT<l|ztKyR6O_32xhEUX;ijxpbw%2`97
zm|*%E*V9!pNFY&F`?h38D_b+CwUc%}QTSF~wn6bGpXs(g{S4NPlAHsaj@;I6x&Hto
z9gaV{AO41G&5_xZFZQ{Af`9r+r#hZ+)z0cBWsr~#KRVmFeZ5H)&t1%X(nI7{^b@(k
z1!O}U;J5E0&-=Bak8II(EA<%tYLqNlcQkvG`@m+OHOxUUmhMlfJ4g7}OIOhn<0kYT
z(3Uwz4fHvrRFQv#5z~s!Ei0Ujh<&JWI}AL|!`zyF%0{aFi+64NpuP80v0gzODz75s
zC<I`WS&ExY1|~Tm9(}7NOZSaF$n@)TJhqmfYG0Bg)UINbe4Dp>4*jdzG>bH`((Npw
z0a7T!{y5~H$2H>-*h_JJZh)>DR0JRQg?oq?CmyvJ$I7D>xBM~lIG%2Ac?!dlJu5R&
z1g}mjNwNXOWnDY5E89Hgy#A*%IPV$QNj94m^ePOIFbofMJRiv7xP^v7KYAki0bZ%C
zuI~+BDPQ?#xO-rV^OF{9*1=Bn?b)7&798-qD5L@>-w}A*);8LIO11X8qBhAXnSCx4
zN&f($3VDYVg#fO*rgS<H>{FWFDVUamCI0|%c8}7cTPV=w42XZ(aw|h%?MIi}AC*#*
zCD4WMV!Ud`LAm$*z~#|N?cW*gR-|IAtfhe&!32LIIi`q+U~$H2@+RziS1n6A@cEwJ
z5*EtquQ>jMNfGZ>qfPMiIifT@4^G@wyI8R0ao4w{5|QUZDHJ7uu-pL1>M8>d1Y?oY
z-nQD?57UEH<g;MhDd5o>fhZ@bjU-1PjPu-fBCN;rpmH#C$m{j1rpVv|FbzH{KnYXd
zkH)FZG-nuD+|SvxlgL)~q2Tirf;x_CQLTd@4c8f^@(94dA5N7%(7si(G5*fV!#|&`
zOt$-mF@xMziGv&w^!KC@1Hc^6>Rd_*5J4GI0jo`DALbb6s6FUFGsw$lk?U6@QGtd8
z8p=|8n)I=h)?g2lk7H5!ImgTR9xG}{GBUk;RaqkrFh&@Dl;cw5@}ob=0CygggcUsg
zwR6g1r;XU8`#g#Z1sKO5n!aIJqag(dZ`P<WC~&-v;MSGQSp+hM9)wnF<~A5mQ$L5}
zTEfOvE{P(OBZ{4oBpGhLwOk;V=Zx%e)oMuQ!NZNG)7R-lmtn!_W+axU91M?7TB>0x
z*#L}otu&Md8NpLhOA4^a0Mb)yamtKkswhHXb=fMPucc1*>yCg7broJj<blHuht`O$
z9R}7IVf-h(Hk;KFq?VFHWQCOT$f&{;IUNsLzNIB!mj~5NA7|a2y!#%s)a|1QQBPD?
ziwrT2{W?{s;UkcEG{v_d{W}`B7$c5&_o<WY3O3wm4V*YXTvPTYcy3Q@Rj9$oJa_5E
zP2{VoV22~qt!CsV%J82kgSQ~{=B@cLT(3@@K^)Ui5^y%|^kIhlx>Lk^PavuKf1Fe@
z9EDp!9}2_|?zbcV0IgDepN{_khcxJngZP7wexj|e4l|TJh^(7rIoPN6a-)u(gyd6}
z-9|Q(p4Fz11|I&Rr8aG}?nS}wc%`e@&DF7)hg=deG1PNVO$s^r(`oe;sMgW1S$OnS
z&+F2oQ4AR=9^!b*{NJZf(wd6}StBd#b2jfSOvBK16)NbL5`FTKj{DC)kj-kxd~y;<
zTqx*3{{Y!);xb6TBPse;sSzwp`XeSC2H6f|c_luX+CRpx#b<ABN0kg{M|@=deQI`G
zo<CYZ2s{i5qX|@$w_;$Ep1)H-9Cap?E<I@62S5n*6o>$$rUfqEX#*D=U=Mz1$M>**
z-lhWa^`*(jN<u>8wt8li6ONpE^q@h3?Vrw$nHlTY(T+Ip>BT2*0Q@Kdl-gB{*(TgZ
z$M~c9Pvk0-czHqH4Z-?Kzu{XrIXOIf3S2K41NqQx8ucTyF+Rt0Y@a-BxBJ78Y6ewq
zN{^`Gx-9L<UOybuB!W4C5<;#1*F#yjb~cVJ{iJd?F(^3<NBhg4*0Qb<^N@kR-Twdz
z>~3^8=H@AyBk1e6ALMHi??Kez1(pFakK%#1AIpRKR<O|N_VlK$a|K)upsDsa{S9i^
zk_f>W_Ts7)D*jx93=TFr;QmIg+N!GojC~DNN1>!5&W^#G1Cl-JmaxS5g2=Zx3I710
zn$WNqLD%xC8rqmVLu6$Gg+^d|eCMrs(>)BQ{q9Tqlm7tHO(6^3I;3%uSl}Oe)wJ-n
zj3i4uX>&JUpYAWvclw%566OgmCUYTYSoi>R1b}N~T*KYJO7Fr_T)fQi>r|S2h>X~w
z02EvfgKq+v{gG_{0D9gp`{Y%zbr1aX{OUn*4|x{5Y>sYvi(^0g7R-OktVjL=DUG05
zG3L@gzzWp!qU|F7bZ<ld04*2uqm>7>sJg$z;yhiYT3G2(O9TNN&J>a9wZFo&H17>q
zT<Lalygxg@jK|cK2Q_oWI&_+@kL6mGX`_*3RLC2;f-*?Q2S0^i!oD-Nf;b|=#~OrH
z%qIX04z<ZIa<hD`rk2Oi&|g~1p-wfU8AeiXncGH{jp)27V;rd}h761tbKG-V!;xG_
zZyb@s56%h){3l=M$gUh?QfZ{usm)U<oc=sbKl}^+0opmjlUY-e&5qU1x$(X*$B+AC
zRiFM6@8bUe+EvG6FYHhHh^PMmfq%d{sclyF-b+T|Bu%FWwPd$~tj|V}ANQ*^ANWi>
z!R3AL`(swW;<`Tn0Qcg(93+%r{nqv9Z49n-)uV~0{{Vr1z#S)oAaU}~DxaV=PG19D
zFZ!#basJX^{{T9&M~;0z^hFr)&-wBztJ<&Rm)vNuHGk+Pm;MF+007T#2VXDoMt?u>
zrXLC44vQb>T6=tR>Hh$tDK>cE_8<8Im$m-vzT*C;Q~rO_{{X;@i;oRoL2WF~skSsO
zaNU5-XX)Bq#-t_k6L*)5+>ZJG01D`D7sevS<LyKJAIfhx{ZHJ-Jvr&=SG)sgKEZi%
zSRXP`9^7==GoSo>^sLk8uN@lSbGBE)<#n8=8C}QWcV9D?TVCW~bgMB%x3);B<Bu{G
zJ;wz5Q@|U>K<D|^PFFmBVH!V^A#!OXgClqxV+N%I2L#lJ1_(V5T8r2@jMs}J-l8dx
zao?J|C<;i($oH!3xNMG{MOtNCi~<;Erg`+{o#o|7$KCWFl{+4wbJSBqFaxL_)C`Yi
zK}_&!Fr<9LuS5N6UO~y{J?RcqmH?65RSbxMfWx&)jhvCssHoUuC3#c&QZD2<6v{6~
z=Qz*fQC-{lqjH=#Vk#oX8-{UHMJXTyii<U>3l!p*f>F3(Ru%H2`BQH#^ZUg=km*$f
z#YtA+{{T9>DJ!5uuA}2}j0|!;D>79Lll)!y>spN=W&7Q#v%n<I-oBM^cHAW!qcFxn
z-h}7%r|(%;X;HZL^{JXRMl~6WB$fPE*0#OOIa=6izm&lw<qtx8dm6tRU}LpV+m+jl
z<Bg}U;Zi{%gmOzK>M2QFs7saGp=MY!$mvyNhj$Izq4cd7*;sYR_Mz>=bf{9Xte~1R
z04l&ToQ{MXRj6dk0C@M!Iio5t!;aJ}TzZ34+GbkBzZ7dGHxKv_sg0gT2ai!z?RP01
z#cbO~CLl%$ZlnR4##c{5Cs7%~22<;g&ZbPSAP#uWD`7Ob7-fRSJs9AR!lW8)s^G~Z
zF~{)ywCvvJEUU!J3bHTE1}bRnCXjyWGb(yxIIRR|q98$JrMokH-_ER=t{WSd1KyR9
zjH<qfqPkq4c$JISCnSDUtd@4)enxUT4nI%+y<5Y#-(!zT7To~L4Eq`^x+5nx_hm1)
z1#Cv4M`N6S_34J-NKWIy=-acJzVX62D2hFXDxAJwC08A?DLn?P$jj|fgSZp?R0-6!
z-0e8vdekyVfP9=2->zwa$!r{hjyCnAI~Gf4oE06;DkmsD>mA?U83*&GuN)_Czc)0Y
z1A~|Ox{`UO79@*MoDi%8<M31Y^fh`bsOBM8D&O2;gZ_S%NmXJ8C0iVPqJ%zbk`;0E
z$u&um(q@*@Pd}GhHshRrB<B@LOWTYFZMo?uPimEKkc>>FIQ#hAzx{8@nLMnYB$3DE
zMls*=rCqWv^&!;v9D_(lZoa_cfdKh={d<}ZUUB&KqZ@yVZ?Nx1><oAM=M(@GC*MA%
zf(Cl$>BS*D4Dt1(+s}XHNC)mG00BVz*rtGR0`%+$6dqHY0o>36lm7tfALC2LGyN(D
zkt0p+LZLXzai3s2{uJOQm!y7D_cHR|f`k76p%L_>Rs)QIS8)Ra__KjbCFMCmg~txS
zh5Wjo(xymbLKu@Hj*R@{=sFL;REG?A78vjU0PCiZT&U<BOj978Sm61p4u7EhF;w41
zvS-X*ZNEJS%)gQU02<f|F}ncd`cgWwDLpoh02DIeQc-^Ev0BX{)2-U#Spx+vvkSo5
zGR==+`jK3P?x7{-pqDOy6~@%-fDS<S`qxq7M=(Iv!2R8p`Ss(>f6VRttC5%fJ`<;J
z=U7!qD|w$ukVVS0s!Mknf9WR1o#Gd<y%T+$ZNWzi)T#9M>G@Yf{k3#aWsWtP2N_2L
zgZ>BFxY7s*Abl}c^zBl8GjK!_M*U%9fXAT6q3xRKrBO#thYaaO`^_fKka&W8eAc7z
zr+>mge~!da4A5VzwW}<QhY~{HeF!)|QSK=)JE*Rt?sKI}`9=QUwm9EpLH5Nxb=w9!
z+c^4)hiYWybLqu9$%K#fYoB38qj06!{p8=<I(IpWPry?@we+*pNWXy<3AcrhD5vzK
zHZdRJC(|6z+XV_fe|cs-M@o0XPUz#2mLPFjmfEB<sWD9)so0|ee=5NH1-Uq61a|~t
zv+i`6;(|eJra*91DFOKHk}H=HPueY2%&OCbz8n0`%1P0`i}T{CPbvZThv!_Lp0{ay
ziqqXjazT#eSR;NTAXjCk+9k^pVk2n#*j5$idj-y*p8iKH;Gxv2^N67mo0Ya*z<IBX
z0(*1oO<V6U;WcYl`ZE6jjUFzz(X8!lthI-i$(@M{2wloop(E3tzP0C9dVQH@dnq1B
zx`5c-iuEYcR3plCVX6B_U4Dn9`O>Ha-^KyxcMgAAa@<S*06);5sTJftT8;)W=vNeZ
zLqB)&t?p^;SM-tWLFZw`$`kBq!|g|<!2GjbIkksE%TL`eXFr1xMaAqF^pWb?uiC${
z@3%^%eV$B>y|$Xk@L-(i(+HRa`$K1s_Exw-J;NwgmSVnz$rU0^G-nY{BxBKBiszI4
z{CyegQKSC=33X$Cw;rBM=}080$yUxWPi5SphX=T%VVsO(xb~+nYU4PN$I6U`BivMB
zv+}v;>sA?X4te&f(yjr=IOKa!ibEJ3`#W}}bj)CmDZ?y7dtlSt0-=3qxoD~e20bYX
zobl41CkC1uJ-byDkiar&Iv#rGgHC4WBAVc`WOb%eL{FIC(>*|>;2a!~dSkUAlX1fw
zam6u+#>4m;s}zv%b{=^h>A>w^GuQ5u-`cD1B;fV!?@m_A?LT+gor<xbjvaaXr>Ly^
zxO}tz&tp!CMoBgvaz8q-aWrIg^{HO#7b%t`$I4IRO0AaSopWqcp~y7?608-t^fk7P
zXieI7W!<YP;EsK9Rod9EI6q3U7hLtL3IiRafA#7wVwz0Lk}b!P^L9AvQH3G7AEk5)
zqTSrD?x0{ly#D|w{c6lU6n6grkDH}m_G4YdekUKwt}?ngs#c{9<a2SUjFOovo>s5h
zhM0j8v!6v7uAbjO)2+bFEi-z|+rLf)X)o@;2o)F6MhE06HFKU==&c}*ay<^_WyG-v
z)c*j_LxMjJwIX<S2w0i#aJk`Qk0kzQ`d0q{C$$9wDN~K7s2tFhwmD~usV^fcD_u4Y
zBDHB-xtQlahXeGhKWUVWu?3BPx{3h(DI~OLveD&}Ju#9hBKgSlsco6J&938*nn~RI
zd*+_1y$%|n-b-x?7<vx$gtkr)1MkQ6sS6j!mZ~`e+LO(|>OU%=0|AqQJu!h#&Uypy
z#XF0Xo$fiu10C~<iKoVXeENY<9pfaNW2Qw%`?lOe1|3hQ<x5ehqDPp%KyE56p~1`K
zdLGo$7bFvpaJ1r}0rDN7cjVHsLrh9JL%W`;Ii`)pe&`%@0;bv<?_eK8jy*9?k+H^o
zfdZIZq)@gPa0j+3O{xxAk3ZcRH2tU)6u~@u3TqrF2OT;9X@IKAxFMSzfhRwZseaPn
z?NlR<yKzuH6>=GrpYDv(`HXtFE7SmUieX|$pKt&%XB|Ko=}`$GbGAfgC$0@Nsblh;
z#Pi6-OXadJDS`CG0c42CRDUwwMug*>HZ%1d{{R}X2Bjc7ZX(_X{IpI#@!$TvMwX?>
zd>o3k`?94OTocrV#%bJ_V#HI!G5ynmagwKkI*y%tRD=Kk1MmW=IzmR#5)uyt4nh9_
zKJ?){pnS4M2647u@f`mEIta0Aj12xcq&UYweR45O4<jI9Zy_Y$9Dt|aIq6Oo&nO>g
zm}CBV!bl&Bbw7qE<+!#Zc_TyR^3VOmT%X6#dzw{_X_qoG#=UU3X8igOz#pwlf;iO1
zh^nf3C;)TqPZ-_us5tA|orQ8JVMLXrjZXI9yJeXEC!BsDRHrIN=Kvmn_o1W!0rG%4
zoK#G3?e~Ela<rJqNM-~K4m#tSa;Zfe=iREZ%&tCH3~|s7zfNjUNpth(ryz4c5@a5{
zp65L&%84b+34k|Dne{yX0QIUp-Lb&;r$(UMTe5IgReYY{9GV+TaEjE+*Bm*PL(lqb
zfc`2m{HvLt2KpaQm-*LoeQ#|6)Rl(jS6hq|+o8wc)yUsTZu-rmMhBKQZvOzjkMyoe
zt*t+;+4UI=?@^`vvi|_mMU!9$kaN<imsF8tbHcikeSytw+_i&a00UKC+Qv+&zyx*n
zs*Ou-j!%gh7Y!|@r8tg8Ng|!VWH=y(HN9ct%;7aSMBTjju(EzVnyczIcGAd~L4otr
z<~hgCzbk(rf8bR_*;}vik)K-WRg&1rL!K*`?QLbV)JOUi%yTv~<+sY?{sZ6er-=43
zR#F&s2Dqr~que&iLl5Exwykx0+fAt~$#)R`3C8Sw8>p=oCOa}EhnLxZ*QvS{a6tWe
zru?dT;ClL0mYTMcbO4$&ZvKpSFXvj(+F4GZ#L>vV?6q$f8GJ0{oZUSpnlaUskG>D4
zMLYy6)ABW_+HI)elOIt@{g-Th>HLj8Rzs@>Wt1G@$o3;NN4WvP9RC1x;<S#HA88zZ
zG{dBx(`V`^<zek&)vjb**;>#3zS0IH<g>&HKaD^%%OUq|EyjPkxUJ9ZslSF=VbbS4
zT@R*3Cl*|4#ca$!>@7E+^f<@U{{YufpAKn^+}i>4aw~WHGT?Fo{sx!qS3uw@pRE=v
z+kbfHo5MO`?J^v8b4mXI2(FyNhMPa{*F^TWBcC82p{R^g54{5q!!%fGte;bwyzsrG
z7IV)WvgSD1z$`lgeJejm(1xjE^LfsNWGR54j)4CFTI%&ABTBr?Oh0h{0Qw26zX^ZH
zDqp?+y!0RJt#iqK{yvQL=v0^XT~~j%AIXyqI{A+llNa;}`PF%1Xk4s{$lcY34R;hw
zi8Z5ns;|fK4AxGLnlr<WF{i3Hc43Y)>s6#XTY=vddr?G==&C4Do{BM9^M9%*zZHT#
zC^Xf^9D|NVO+S9+2e{zWPtg5pPvddhbyGF4**O4@O0Br_lU7^(U*lDW-KL93TvBJ{
z&UiG3{aaGM`i1-{m14dL+{=Oi%`umF1xOvnDeM0L9`b!D2i+f!^Q##V!>P#UzqJVC
zm0^}W{eSxO@A+zeqM|+hh^iAG!vGIl@M=h;DtercUiA!|H{vU&@XfRm&KgK#b_0UQ
z2_0y)^fIkkQiGFMGUKzjydSx@k;kZBKR%T_eiOW$kt@XtAMYK_`491~#fi=`a4CoV
zW-8P<PcnIhNh?Jq_?VX77qhof=TAMu4{(F>9R9Vb4Yj?u`j{h+9m4bZ^{G#46b;9U
zI-IhrQfuCAfh6<S-kMO6%{{m#kaVETX5Xeg=*9<sz*L8;{b|3%Xc*Enk@}w0tAWOI
zpL#orQ}-wk-jxK_sV-eWSv&e4tyxjW<59Ok^c0xWv6&~C6Krgu$4^?1=VOp?Pp(L<
zOZ6XOkl<txM;#4hp*zPWp{p9j-CU7VobpfC6lE|+Qa>N(O@5z+B6L2(k7`2)QdjPc
zr~BOgl!87p#(ykkr&GZ{?NcWtY{4TfiUQ<o4gmlHI`c@|^PFX|-1<<&fZU#+(vSd9
zk^wt={xl3ixfsq5(wAvqRFJAV0y<KUxDR~dm4+OS)Bw@2<RLEIGoR~CD$I749C{v<
zmjhxp-Y~tt8jqtKae>?EOa*wr1Iru^=9;cw9JbJUjt8wMC5YTf<GngDx>N^|gTbI+
zv9q|qJa7o@RwPpyBocVZC%4j{H_Aph<aGRL(*FReG!WSwasn_M<FCvB<JzTG4y@Zr
z9q>B-bya`>U;&)x9r&v-5<tm4s6iwwP{}_l8uP|Zf1O@fQYFj$>^(;V=~K(YU~mU&
zhCW_jY-8Js)0HD9CdRuKz`-Q;Zqx!s1A@F{bo?o0`Bezum%+z+XYnZY6oCq)s097N
z*o;s<Z@k|4^{U&5<Pr)00PEE4{;d6frAX{Ki2xPro)1cat)52&hTGXx<DXMcLG!Wq
zKg>}J5*s9*C;?J25ho`&!N>T}BOnq6GCMJ-_cs3kym-fddYQ=_<mdkYu4xQaDUb==
zM?s!xvlout+%_3ejx(Oenw?Yx-~so0RcQ$}cR3{HHvUwkuNFFg8uc@E%jamc&1DyK
zR}h${Uva}J{cDl9^JA2#i*of=-Z@jy*F=7Ahqqg~*{F@V{v3gvdR72Av$Q9Zi3dJ`
z&MQg6FLk-;VKEhO^*O0KHFf=c<Q`+=CKmbvKgN$V{{YV!Gx&dvKfM3~PXe>{6#E{x
z_SQfCetrx5g@-daO|Fbol`Jp|0gwspo_Y1hts<<atYJatma8*x*c~uGkf@I-ANz~^
zsjNLd#>+LwC#_Gxe~_Xs6te-5o;juRlRxVZ{qz3-*Pwr^U-%dErmx-<{;f~3f7<xV
ze1C%f03uHzcB=_K^5^dK_MkweLGwEG8+sa1{-r<OZ|6dH{{VaX3O=t-u>SzH@t@E5
zFY*twFZ}iY0LOp&^rF=ZW6eB+kC(6OLFy^P{cL~lieFc!*gv)LpU?O&@&RcP{{WWl
z{{Vjf0Q&VHu!Mkq(s>#G09XG2*QSm$k6Ms_s~_(l=SS7)_8<1PGx`4j1^z;=oosRq
z<AMf%)c*kXRU&CtY;biFKkbkI0A*2R`GegX`BQqHX!aEQ6aA}<{(r%Lk$p59C;tGL
zr~W|y0PLz|@XebK`3qzI*njp_&)xjIuLs(w3yG8*O(6O)tri-S>G_=XGn{N)y`%f1
z{zqoM6t!WItB?Ef{{Y!ki{R^KAMzLf0Lg#;%DEV>t~UPwub2M&HDxZY0OEN^(ajGH
zNqwKpOE<^MYOn5(`5i#;O}Xi;ul@M{0PL%)(DeH~A5B|Dlt|l)yo3?BpKP3P0Q@V=
xrMkHLCVcsGp1aLd7x#cl1>E@9c=HWiI&|Rg6mny6SiDs~XIib+`z84w|JgcQm7)Lu

literal 0
HcmV?d00001

diff --git a/fluid/object_detection/images/COCO_val2014_000000000785.jpg b/fluid/object_detection/images/COCO_val2014_000000000785.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5107ace65f5aedd67ee81317f24c9186441bfdb5
GIT binary patch
literal 38022
zcmbTd2|SeT+dq7bkttb{QT9<}ExU|$$ZgHOv`C1`78$a~43e5Kb|w1|5-JKIWErU}
zW6M%@W8as-n0c@6`}sZ3?|J^u`+nZ{y?kbT=De<RIgaBzkL7zD7yT!F95`X5XQ&52
zAOHXX{{Z?3paUFbVuCRpISPZpjvYJ7%mQa+Iewgl=OiaPT!0rLD8S3lFC-!@CUo|^
zFh9Szvc!2=IYmW9gqVs3QeItJK~etCAdq9nj<FnP;bvv!mOsOPM*jcvMQ;Q+jzWqV
zuu#ZZfPn)7<$%yz00g+sBapu@;6E-11C;Ry6YS_Q=HuW4A5H)a5Ga&^5qjhZBO~~9
zD0m-W<T!HjjGQ(Tr^PMUSu|WeJR$d}@TGT6T$Vj+A_`7E5yzNMo#y7@JtrzAE+MI?
zbWs_pqN?+kuAaVup^=rf&6TUyuG>1_zH`^b)y>`afuDasU{G-6qo~KxPoClulaf<X
z)1JRb&m-g)yecdze*M0p^25if>Y7hqnp;}ii0xlHNWFdi1A{+@h9@Scre|i!bMp)9
z8=G6(JG+#<{XcX;0O&ttf#3fS_CM(20O?|2WP~!p{?G+s2mn7&4#p#A<d{xsTflCi
zInT<6ABA5^$bHvzOjyBkjmycWhxwF<;>5Z2KcxLl+5bDjBL06-_Me3P7hS^u3lsth
z56S_cfUOk?-(@}V(qpI3#)%Ucu$(v)uN|{3;0a%{bnKRs6(6}wvBZ}$@KiCS>uJ2U
zMdlo>wXpPrM}<`a9dbB-R_+|8$;3>Y3RN6Z*-*d(M{6B{J6dQlVcHdw>*rblF{QI+
zBAzKe?C*-^`Q_{+n*G~<0IsdYvd9!2cAN43_Oe?qSCCz~<Ad^E=kSNOo#XOPGNBOb
z#V2R%#?QYXiU5saXH%kmi}Kz%eooN;Ds*Q{eX>lqSlH31fW2tidMN5!pOsC@pwHV|
z3v6zEEE3{*SZ45Z5@%}!7)iM1Fy^Hu{;NeCBA$o)k5-&GkPrTlkZ=<R@^Jrg$H-P3
zBJrnF{_<sA;2hjS>zJXD8+fxIKZgM)4z?Fsnlo7F!hvK9UBJ4jAfHUaz1N*gOj4}L
zx%ES4!f+rB4rIs5*vh)DY8^SJHjgtF+yats2dCRvfHNx?!9GZj1p~%C=|@}PkN5-N
zj+Wyg7U5cL%Lv*hlgg=ZQcO3Lnb+j=_PGnPXIp__>nQCdiHrDPMg{mIXF5=BCb4Q@
zQv|72GP)7wlORDBabd*7+BXRlv)2PI5=nQhx%@O<BD(NjO0^$UJgkEWTvd|e;a<>S
zSGsb8`}S7|A*?e-zqcN+!%a;2ID2#ZHPM{PTVG3j>3D>&A#c4wbKG%I+BL`&2(t|P
zz%pkA`!3`nR0V{gXs=Bm{lxRWENv6@=sv5(q|=-&>WwuPRIM$l`(as4<H?!m28T{i
zRQ@;|-;=V_4@}t~25?<C6Ln4+WLzFj0s#5e_&0Y!egpacV(H)V`NR0XW%EaT|M}w|
zahT{UmN1?)9$$_<p5fQXgUmVk`AO(GeNWY!w#{g6wR5qz?Y5hEc#0*&{2uTf)4gy`
z_9Q4OhQw5m;YQ#t_*=w(rem%nWn>XH@|$v!t_V$alh8j&D~dHt;YX1<WE?Ut{F$Dw
zVWhr0BR}bGW~6>tGK;n@!q%3NJD)1yN_7ROtmiK1oL@;x?P2ju`~EAdK{7&)l5X-d
zzx<u@)IP~6lUN`MD`0#YdAaukw_v3cmQ4d?i((c>88LBdCI`%kGde{C&TBD+>hcIC
zhrxLPc?mXlI$&p^Kk0hn<yAV+Z(v~->>oO>3q30rz-TT1WvRecc6_JAE}&ql7Y^xq
zR8#3=D5(+6b^G}&-e=E4$Sxr+i|St*7G0b#{)o&RmiG+T#%yoMI`h8d-llTqLNFH;
z(fLX0=52<An`Hy)$4<wGuG6>@0z5bdDpwQqrnA*d6{75{)5>4j>LvUhZT3Q_sSjOB
z!W(A|6`6SaIK1b%G+JZLHF8sF;m|Z!;I`IWQ}QlMabap=pt8hI(6s*U8|0`LXLeC1
zt84S*FX3>_l0uh*uRht|l+@Q(K5x_RgkI&K1DZ2*0Nb#kJohGLT@O39$=6E<_@WR4
znRMWoXe1rbu2-f53{!M~S7qu~=p4qT{6G49qsAH;eWL@9(zNM7&hM9WAl!lu#3I%b
z=TKXGy_hu#R9jqNfI~3*>p?n@?L!9|9rk#WuQ*UeKAk>f+8U!Wo2?UQCvU=Nk-_(A
zN5m*SU4o0N+S|5tU~|QRa-0qbTP9YZCeC$Z=9FI1fpJ?*tYu%FXO3HU$>C?ID&I%A
zfxrAwme(Zl{%7%S|K69Sp?yw53R_L7c%?1z>_FHepdC1$y2KW;a+2#Xuw)<A;wfcW
zeW0{tR`6Y0+)E5+@RNO~=ZV$Q&wX$abYSo|;@{|u`-1xp(wTG#q;ud8orW~UY>-Tc
zE#BUSKV;(F*#76jaep}TcaPs7C;#-od@;<$bbFYBe8~U9%?Xg3nk5djQ1D3zX4<Xi
zIyk25e~y_%J85#97HJ#szjMa)-<I@$Gr=Em{kIAJYfXQo`!8wz*OdRKv>se>p^5~9
z(yCnj4{81Tn*JxzM*sVo=)nI2)x1&)w38bDNofDK8=z5(`50h6s>kORc$*Ef)xO&P
zs^E0q?arucQk@rM6bEM$l=OYF?o@s~oEyR@DV@n-ZDF^UjOneYc`>m&Hc>CvFh3x-
z$AU5Hd6Uh;Pr2KO@na4%ZQj39ZiK@yNWcGJD5JdhRy5v1_xkh7<jOxL;~c=)>ucot
zioxQKWjS{N4!O!hNch>)L=fD*#EFyDz!=%P$!WpHCH#BZ^Qqz|s)g%;Y+HrxoJ~^r
zvmbDl-GVl^qnFq}h@DO2&d-5r_SNxvxwda3GNGp#UYpQ?GndcMTF6^w>m}C(37fAI
zvnelaeMvkG8GiE8S*txX-S62(3VS3k#BW3<&z~o17`6%U7lL_d*|v;;a{-THHK+rh
z1TE`H_~I0_jxtI3gt{&+?pff2flF=*M%r7#nthJ@ynrzdNm!5w+{$4je{>~Nadvne
zZOs?dB8d2cgx+K@r;$#uIa6pmVnj6n(S!y!bgB1mWc2c9Mbf=<iQ0_CyzzW1EAbKf
zj^40osZTXsJD*<?Exb|dO?FqGF=-0BX74o90evO3({`l2$Cf7k%){z}bDs*8H;we7
zsZuxjXkqv1z-lEOm~_KT*t(8wZe!>GhLa9FEo)2TD(`ot1CmHgm9Na+p-eM1PMOB_
zDwYnQKpnHYj-sTaCT!>a=@VK=2RLq~N}%x;`hWLgUmpEN!(3hi)j|v%i1Nsz16}=e
z;BY6KY9dCJG6AP9Z&;ngtSb$Xhm+3YU-!_g5hFEeR9e~uO`l(rcW!8QmJZNpV&K4L
z2-ZsW$!sgCD4q^HDXaBfk?{Fkz2CEKi|U9x2#^OA7Kw^#pDGLEr}!UNA)iji%C&VJ
z3=wp>5iPB<gubkZ>hrD~BX9guKhf&FVXM*)4$!PMsHM|aL_&7Ub=FhsT^uiL&Gcqp
z?|XH)r0UMl{!?8&$Im?I_-IVli?U0T_i^bZUdLSuiNi`8gNS{e;{8&@3}R>Y4#<rH
z3MS#vhOKU-@*}Dh@2-jto*Hd6gkWBj*-<mm+cfUGHX+<M#VytwgHb=Q=`EmIJUV^T
zcBq|pat@Q>cqOc0lGW$8-2EvO>(Ii*dzx-%nFUmRYYdsawaND_%oZv!$)^HdY?b~I
zz*MsX>yn><fWwuLpGw^MfnF!Y9L~oF-tI`MEVfkH-YUgVY<N#o4;N5><b(S`33)vu
zf;aB}O$T&uf4}TY2N0)BQ8P*8*9n=0BV4~rw=%zb{v)u{J1hSprUUb(sJnr@3L1!p
zr*xnN{Lf4P)oFIU{y*}#8Q3R`2t{u>x-Ev@8{6n(FO?~=pjo5-OXlD%iL%JWbp0m$
zv!9-TO7y=slug`y+Ii|Pv2K1+nnhpFwmSk8rSHE)Ingj-?dq_(TSN!OTEOio{(p3z
z`fg@L2Oegx5x(1xG^{4k;GwqB8#%Pm?+xUw2KEi)e`f{izmF$XGpfNy+f&lp37Pr)
z7B?_742ysH;w{P_>#ee4mP1(R!=$jf1SQddG}6J~9)BTKGV?esY#VVv$)@Ux{kLBM
ze01Pv5^awVaKOIDzW#UL<Nrnq9hk75!~Azzta8Eg@Sn^nn-V*S-N(c-X0K#_x7c?7
zv#bA|6*m9Q3MsXT^Y6JoxITLqYb$Eq1!V3C1!u6gU(E-_N6w~cO8wA0xdNWxHUPB{
zoPog%ze_<{m9y!3f=98ZAg;w}<NMVC(HXDH4u$n|!FS!@6N3v9niySb7sX=JZx3U;
zqIsLR*7Ys#C^%kb%lw{Ef0>7v7R%cOj+n4jiwwtux6Tw_+n@J$Ft$$~q6S}l7PBRV
zVW#00oBJ|{TbM8JS3Z=#U)<rnY^!D%!uQGOJ?pcIB6SzgPG9xqZvEzLiSwFAk)P9n
z&vanF?@mJv9l$d)UN}P#JBZ#yOqq;g$S-NxQ@V||63W?oGh)-Iw%D;H6zKU3=8a!~
zzib$~-;v~xiUKuPI$+DH{2lQtiFTM?$o8&q)LmhI-EO50>y^rWVOMA>DPcwY({bgu
zhdF&dEnPFq2~!ehYeuMAepXAmmkn)h*%`B&4J2Dm{Z4`dbqG3eQ>oV*P!MN1M)X#*
zoWz}H7`OC<eUO>bbA8LybJ$~uRkMs-)&&&B*}D3lK(iWyEysh|J5q7yA>oHTdzM_5
zde-Rn$xH)rVBQ9i=`=Px4DM09{#ChU8<5_XPNk-<p<fkr0BU6U$B?A5a>E-k;q83*
zO`Q1=i5zoA3#~JoNALQIJic^|jWSiJ#UyF$5(CbVRvrwgjUCoF!gsYh%z7!LC(?jv
z+3v8PTJ*DF6p^9^28-~CcN>1kOA-{gS5BcuIW`F5LT~qDZlg)1K^})qdsY?NBf5`A
zLSlbb&!IRjzXjKwCb;$d)tQSj1$?K$K?4Y=6hQvk6@x7GWj5w%Hof4<6z5pDWDZ0N
zaL$3Rc4QKB3mh9RMRkCb^qQOr{65r32VOnATb+FhPpKXjLu~$SYyz&Do}>e2#)!;(
zo-SiO6YtOGrzS5((}8=aE#Iqx64GYrn$c(JKoA|M_CtvVvz>llK4S!|5H<>LlUxRB
z?>$_5X?5^ykJ(*^-|>-_ypNfsSt$yPsoyg76U9-h=_VzIpe6b2)`*j5*D-2Tl%xaR
zVGK#`3A<>D)mzt*#l}@*c7^r9XK7!Q)BK13LSybZB%JFXeOlLwFd$egR=U8#GscsM
zsb-kTP<LOlyD~|R#^Ys-Nu0(=4z6?5C$(ydq=#JRojpG2hWPetM%14eiBI+v&Z34~
z5zXXkD+r4pQ9E|@{B6ce9N|21?#N}`f`GfYvh;e;)Lj_u=};s32U2})hwsNJg{nGi
zwO*e4Ce<!x@yVoK8OejP%EH=6cuEV`BpAyZinn(}jMPSET-C`7^be^s3F4^2yX}dz
z&9)PT-_9nXjh0h$&xv)sc@W0IE1Elb{H8~<<;ToWKFq6m%ZsO6pYDC!+2x)R^8T<$
zebA`6y(yx4?nha@hbJUn;U<yAr&y>YmAh%1sI)c0<>JIAEcO9eyKqOmx>mL;7nLb(
zR;oYS=KJ)-A-5&oqIbARr}&#<5i)@|Wjv}YJ)y-}I^A5tw2zB-zyzU0UxyTRx?MXy
zxgtF1O_i8*#ZxOC+A8G5yXsFtubudH6lG-FUVO^!G@**D``C?S1M!nx(B8)d)}eO^
zu=!Rgh)><O2?VtbwT!3rmH&0RC~QM^-`C!EE&VV2fkViVt|X9sjf<(}>+@7lSB*Ae
z+AB`e0WZwFexapca>QR=afNekKg$Ts@`_$EAKK|aZP6)kBM@|K`!k)aJ$OGc)K-02
zg4sJ0Q7Nu{EyJ~StJ4i4=j3vqzA{V~|6asQNa0Hs%n>9gojYgoB-xT=1(DOeAPV9X
za;Et?jIE*)edbWKHK%?B-Y)H$4La!jS(qBllE6JR7;XzUR}%fWV6{`w3TM9kSiJQD
zcPns?DNX{ts>RwWpk^8G3Obr~!ItKL3SlkhmM$2nYa6D6K`Rr`uLHW2DLa*@H<47y
zq21R6nhxu-E9yEs#rj)rf#zrY)&(-o_`ni}+<C3Qi8rzrZv^opX1QfOSSi2`B0v&y
zMuDpa3h4mrIT=NB2B#SQ-ilT@@O&r7azjEK5}4l#2V3Jpi}JAIzY&xdZkX)>heLRO
zYhVS<Nb_Gu&l5axX9Q$upTH0%FC9F!bl|IgHyv0W`^QK8%$~0K3_O^D4%;h*(_K1K
z)bxvydJ^cHg`H2s4Y3kG$))^sAOR$o&n;*R9;%fC1r>;9W%X7US-YJFwwc2cduh<D
z;xljYwTV72pu#YGn6z?GgsI;R>@p6%nsxJyf8zJtsj8rGlVa{)LY+o&8jszpT9(6t
zw!iUR=4l9ZJ6ow1)Kc)6eNc$KaNyI^3%3WB>}REngC4cai$e?<A2(@KbRX*aSf?pg
zsugMK-bLRD`|6!b!f1Sw80KcTa}f%4_xNS_;J`m+46A^*9Tnul4U4(m!JRvvdgJzj
z#!t|TK&$@h3#f7{y!Pefg`Ytqvq=6dGe?yvQksmFq0Q%KmZ>kx7uM}dC++IwrEVF!
zYgz_rXfF@`JQ%E?SXouA3#Lm8x{1_9A6GXWV*d3+FuH2S{?sncjuC(F*hWizUvRc%
zaMI#Um*#qHvzRx+V$RhG+AP*8KfU=z@ZXuyDFv^Hx^|x}a>y438rInFAB8)zuxlan
zkHRyYvnpSYB+E)_&sk3+atq2TS~(HZyvS`u^CJK^jFbeg%;;v|&_fm!v;xXrm_ut%
zC@GT^r-88ryfnn8gGp1r@ts&sv4OhXiEug)Uw-(G4h$`JKVbqJqN%NcbRf&&K*Oh>
zrh&?y4yk6}JLLolzw1zZi>-3zt^hZ_Eop<G`Q;P-Xjf&{HL6K5mRet5yV;4ZyHZ^r
zFKA=pbY^6-IC^*K#QO3X#6aa&E{+@fncSI^;6X%uqe1T3$IW4&W~b3vxzQ8WZ#9bL
zmiPOAk`<4%=u;Z^*Ui`p2~f&_3)w*T6!Z%2+mYx?`cr0{9jF+ZI_a%lcV2PnNO(u_
zp~U2RNHHQ7;?v3ne0s8bi(>U?9WjN7g^J|Gt5)g3=G<Hv%G`V{_rx+XoCgTZQP?YA
zci=6e8sdd}fnw*iw-_QABDgc+M$Mse>BiQ%+OSCCJzo>%qn+HCMiW3G-c?DqN`!&i
z^6+`~B)sfMux-YTtWI{#<OY}ftLz=mqMoSa<%<)5j>=6K!DTIPT-y-$ZpU{4vzo5i
zMe6|-j%zj7e!n$hT{$}|wTJyjzEZ)Clc_v-zmT=Y^U7}029aTF$M?`p`Cb0Sw8Dj_
zuEn3f|9VQ*McldXK+62y?)t;n^9C5CnpIpxo_!Y1?ohzZr5sNqY!a3>{3fwyTE)4P
zhsOFWrcp)IqG}GO*bg#^zX=-!CpNV2lb(g25^udcgTQ==Ji$Gk-S#Y;Ndj$k&u*)0
z%9Mj4Y!z<D++t}xtL|s9XL)Z2k#d^hyu}Y=D<lL>xf@Y;*n=;m1NP6t(}HRb^e@wa
zviEdg?p2v^Yr}@c7V02?4)pxExjczqzA5fZvo#J{Js{AOngi=Ft>3!A9h4m=M+Z#O
ztJfW1KM=YJm^#d;YIc$a6kc8YB|I`RJJ?mX)7$KWzug|`Jg?lc)w07%x>iY*nknmP
z24+d8;-MRBIe8er23~1KREkXl<H!l_n0Pie<f<*B^lvz4-19~I&?&Q%X0Mvi$FCdc
zNN^b6a?E-}e&xGpp>qB7(Hezk-Lvoe!Y<%dex7lQs>DJ+nm0%VV?tm#Y99TX-&1>t
z9?Xi~UuuQ%)zHDQVyZ_QjqgI~SZs_ksp!BaV_om|{m;u)*iZvj`+_^K#^#e$e>+_E
zyTjs{obr-6*Yudk{^VPVXKqP<1&pZL;il@Vb39zVIni_y?uRUwN@($a-Q^AKxVCJS
zm=j(b(6^4?@wnP%n?APq%3E$aVJ1AbT0mB_hFX#`J^ny2C8NN%@5?RQ4~7q)#yr}h
zN~}ND$(X54cM=mGtT#PXd@scNS?Zg4s?3y$PV>E<rkStqWEn<ma#vAPM>d**+x~I9
zEnTnr8?%D#FJ*(f!n3HQD!oJY4-XG<=~6GwOc+QVj-s#fAeVe5-*?#5&k$BZHDdT5
zXig^4j@WBYV4f<>Z|HlL@QA&e2*}Qy*b=T~ccxq|ANk5UQ+f4bZ>!?Tvbkx!j<*`E
z_j!sD%1^&oH-AgT1UmnY$Cn!2x_`ej>Fa&%y?*|dK=^YmnN-UBJ}JmDIIOY}ze$}}
zdr+)igR0g03R^>TV7cXVrTGqCMX>K)GiM0%BR3rHM18=3hQ7-_Dnbp>_KynsyTL96
zgEu4Z39dkCy3ZR9eCdFZ22}Ggz5`TcM#!>?FA4}e*B4AfVKp`1>d?ci%G5)C3eP!D
zJ{U-YZisuB2|nu64>)|FuP5w&|Da?u-dQ6`RGe7P56XAa@c<_q>gI-G26VDF#G9Sf
zE%X%|P4#W<dJ@%W4_@rTk6IZfEb4_N7s9co8bvJ4xx#a<t>*x@<<X<ULc5*f!>_#o
zsD{EohLajP9>c56GrgTJYu<<;Th#huQKb?ezlzNg$y!wom`*I$jq^9$TGhFZhesS8
z?~SOkH_Q3K$ETvQZ^_-SLe@Bx7)4mcWpu-1#+?UoI&p_{0CL<piIk)w6ydEpnZ}>g
z8CL1p)OddY%Ykh>roQ{iMI`DM(Na>6pLJ-5>1DiLcbEeS%M}_vy)Bwj5yv(bR{0ED
z4M3JJCuuR>`ItDQGG_wuzVT5Hb@;%?wJjve6{0uJ5HV(906h`rq~8E{%*y`*Ihk{F
zZiZDp;_RD^{k&CcRthM)m~Opf3)PH<FI`k2k_{hs%@1HXJt8wHf;Ffq0~_tOYgr>c
zZjH(XGCvY8W|%YH;1=XuyD*S-x=*IJRN9Q6S3XpnL9P}FMc7$<*qcTa7c0b#hKm1M
z)?)cd{xq2^aXcoha_PV#E5sZc><#Qco;MuK)zB~~yzMuMF9k&PeUoVGlaK9uyJ{}r
zY>&dqHTxjhXgI#uZ9`J+EYCC9d*R7|?{?Yg);-k8O8z)Z4+XJ3R6;^$MdOV&F{fD`
zUhc;H<G1)s4%TcsyjZ<`^0P!)-vVa-Le`V--tyPY-W5t^U&3&h@v|xD$pci6qP4$9
zhhQQhE=hXcKUe1yNU?R!2s;*%%IMJ%N^m#y`8y<|=W3FH4o4!JJ~X56h6DHJyJejJ
zAfgS$mBTb@c;0`*aJu~fo>}}2QCJ^@e>yX$epB7{PHIBPsjS<-&L;}R=6elkaOwMY
zYF2YC2vw^uqqK1?>-Nw0vl65-@2*}xp|?V6)$9}Izdyd7Z5mXZ?Dg!`_Pb5{0@#pD
zVbnFR*pDSb!U>}R0?4xT`%m`w-qXI-rb|6xD7yQC&o_pBlF@e+M{v92UKPH|zjuo<
z=3(RY_wp*jTg2cmeffTlbU-AbYD{S5n_%6b;+^kHc>ilhyI(EXjFEQs%3DmVEJ9w+
zzUXFcR?{924olW+AjW<83$L;!@1w=Tzg60P!FZF%Ob2*cb*=N#d`o0zOH@WCV&CZS
zuBRtUAMw(R^|*DKYP{0=Cf{r0V_LGzp|8C@^=PS!VNjkP1u^V{Up<0Dbyv7-a(Tz3
z|6D4L`_ldJedbtdfk|Zw0bPiHfB3P}!tYMoZHvb-*%JxDx%+D&oYr!+F&3{!=L?ZG
zcqQcr`e#JX7WWP2Ql;0=58vE(Q-06s97w^V>425ooPNavO^W6Ra*T?`!i}I9<D*V0
z?ZO|_c2v;UTn8f`;a@W#QNB>cUoq^G!1fzF^WxV@1p5t#R6&3BS}b4wv)u+w_Nf~;
zk};<dt5=NzuIjg$N}oih&bf*eA!3Dwk-}M-Zmr$k4v1K>OkVtUhLwnyKEYy(MKi;4
zZ-EWcH3*-Qk28P?L;;Eq$cit#!geRcH$hh`_4dJJxJ0LVDs|l$Dki`hMEpX)FW*0K
zrUMm&`3fv2zv9cJe$cqyA%=g#XxC_nuo`zdAeh~?q46C?eI|$5A=8qBQJs4FGdR~#
zeR0SxJp5-Cpr~+B-)z@6X3%2dMF2rigv8d9#MQBZ8nQZ+vU2Rw-r9OovV&NQpQ{;(
z8D+1}-gB|d`%B$XVt>0RZgqd|!4-<cK4$s53E|+i!ETP8+z;us(=%5PkHy)E-sm6K
zI~5C!fzY6NlZ`f&A4V6mqSv8{3_NjxR5`5^&l@!q7Jo3vyyTx6c*B7;?I#xHcTS`j
zZw@BuHkRGmtDVwl6-#74(_oZ6O_qq<v+{W{zh{9y@x)MFnWkpssB*BlXA^a8Eo^6{
zk!3c|@nsw}vR5o!`qWuDj!r$z$1i8HS7pdoE|o~@nZmsKqV!C;GeWlWCY)v!+T<v+
z$<i7aRkQdwEvhac8ge(j^~)BjGooY=^`fA?i!tm$m@py=5QVc5HVeMQWF`A%UENT0
zK1|v(heW}6;NHz~`U4R#WByPHS=g;~z?=ykp*D@vf_op^znhe3RtP4z%JshcoSy#y
zsJZl6BxNv|O_>m1$B;4t5U*Sz?DryDdF-|cSkx;Cv+wAQL^X3r6XObbGe<%Ll_J{I
z%YV=lM})2AFwUW|nspYBmta+j)P(Nr0`Xxz=7g0>pfeO@s}TM}!V+e85UzD$mC_B#
z-b}YNRlpCV`{FTZ{rfe0=71fGi+VS=KeFJ<l$g)d?=+~!dvQpO7^OkdIH^-tN;)_^
zrdw-Euo4Ili=5Z{t^I>fn+W>FrbyyvabRS{wMl^Wj3v7%x|OG0fPG5q_+xd!P-Om}
zECSA9a{$X1vxC4A+?EG73&T*Q(i%k&EWb09D)pnzp{u2y5i{68i*w)V*mU5=d^PFT
zz<{1Dh|fsNP(1p>TxvilXVsDpFoKP#!P?BBfxEBPyUB&_VR9o|`OGk_BYuM1k%EQm
z1pY%hKuRtPaaqqNzT59+i2s!l$|~BMl+v!5T&pWOh9O~DYnC(?e`rd0G&x2aJT^;l
z(W`BK(0&en$i__*Oc`?@{_runu>P>;Zk)nKTcnJzzqtlR+d;(y9q?vh@JvRX&!{?H
zIO8>g;rSnPki~0PQqm+BLU>FC<%<@rNpd3}z+@wQ0*z*TXK?Ag-s=Ftmj<*HrGmU4
zF2brYYUHAEAITT}VO_6-f{pR~C+}X&ObGaq^(ca^C*xtm3(-#k3bqZJ2tmI_5Oq^x
z;@*rfSggW*d5|IeeQ0SyL3~mwL|kvj?CQvh%7K8&OkIZIdGl5t$V|#xg)S|5!yRCt
zJAKUPU23T6<kf1_nQEz-6mF5%J_cejRWN?G`~jOKLA`o3HxKh`;`$<WA=Fat)Jr5D
za$Ii;@k=NI<3LL0oz;7elUeA$w{^9f5K?{E$<>eUL{2ASIIUAztG}Kfn!mbYx;N^N
z@Evu=Ez}*!YYISX61)vVEEzeb&SW!AAHuX3yW9t?k{3dha;JNm%?XdMJuUg@lNYo_
zj-3b^&(tH$H%snXvftZ=McQ5=*)FWpW|0$p>n+R|3y7cCConszhzasc*|XN)i12$O
zRuyqN%IPFKftf0%)wrLiE|lr18DB38CIwn0WH-w7SHV|QokVZ)+4VsEMZ{v^l;=mm
z@e;0k5o||=3lzfGOKq$Aa{H`xiaUg}^RIRW@1OrnIQ&fbtbg{yw8<@DeV<ns1?!1U
z?C2FK=7bEM`CIS$-kq4OPydBlbbe0>n#^7j@bp|MXv++2IwzLzNtL0vz<wTFt$p4n
zcSKug%h^HVui0xXJ#DMpo-tZ;W?K3U_icCYM`y|3yxVa>%$)U9p;wlzB9;#1OU7Wv
z9Q_2=vu%fy(@FXXFM6WPc5te@RpfnzPsar{CnL9VPQXTa_ARj*x_szg9=@%7FOEU{
zi?$VOoY1u!g!>vkp^$uazpl+!Py;duC%2Foa@QlfavBR=gtEj}<HR8li(PAc=&QQb
ziK`}d8ID03m~p?k8YMF7IFi;+JOtyVq5l=r%JaG+%uj;{J?lI=z7bTgpZ%TW$Un~B
z@d?k;Rd0y6-c#_``KKZWFRq^P6=VukDU~->h_TW}Hs)){;mh34x%iNk&v6aiSiir0
z+FKaOlpOy?9&kc-1v>)M{C>*@jjH2GtDSD~s2QU0TB{f|hd6+}G{vJC5i=eaxSF*E
z)J9)Ypjgys^8kl1Bj<N2D#z#K(l3#mS0X~3u!v-$w$Y`?UM@j$V@FX}alt5&v4}bc
z8530)S=1Uv$FY}+`o_t11+%Hr9sq@sH7tQ!bYPq0rGj({B&*}xgw`X8VsXhY`{JR1
zD~ES$){^EU%O~q?VEji1>QK^ddioE;xbtP@;xwJXKov@f?qjwOjfK2~BB}4RSciW_
zWVdC$#Rj&8vPw@Kjt#_r1YJqf8C2_b!6sX9y8ntQ@1<{9`tw@H>UKqwFv3_2v%fW{
z-rbY>=zuDqykA88=*lQq13x10BqvuR3gAmSXiF2?MWrYfcj`NIyo@uw0S68n`T0qy
zT^Q1Hc#I3))RlR3-@g?QcM8Tzw7B&3`UFkJ>DzRXu!w4`Dn9tq=r*-j1%?EH)6h*&
zr&qI?N(9IBWa%Rccz9X?p01kx-eAlzM4TIX)J3t5oY4&paEvgUF@?oAT85ztxM6XR
zv8RRABMmM-Ks?E3R+-K&<!(aW<J4k19+#b)uMq?sx?Pkb{@Azr?5O61J@NVklhI)i
z*v6Uhu=yt$zM2LVD&A&_eFITpWE<~h#9aIxxX)`BAA~AUL!-M?w!umTJ82E}LR;Xj
z8A^6%r~KkN2>Lxs%Mm;VVtjvMd;8>$4#;@SHveEcr6K!iFgu2_`={Nmx?!T+W-p5j
zmeJH-rgDNx{rY?mxj;x|8Q+%9gx<i(b!~&ttAI9=2T2U}5*Kq)jA^pbyN(CgeTT7d
zSur5y(2dvnFBgC2C&v{8aWKo!6808Bv3ETWP|If&m6?|;Irzg{Vjv<6>`0>V{#Bd@
z=7it<6ZbyUtDR@et}{5wVx67CqBRa}HO|i~dvI%Q<xS#Hu<{<?Z-w=T!Q&x`_8O9-
z%TKTJ&7PMBmUyB?z^o+Rp8fev_tUBw;SMuT0}X`R<K&i5#nao02Zxt#>P|g%zUDbG
zdN$nftgM+x(l@!ruRGk0fVJyW?k`y(*|x3UhDugNgA?-CeJt#k#plw<o2x$`nC}I8
z9gmDhXI$BSUyVbib8=rg$B}J(2ebMH*Q(FMh;8)aS+tLwe39FKve~Nl!gQiao?qev
z=shhquEuFuGIjDjUP)J~-b`2Znr&Z=P12`^(gv%f{&P~3$e%^G&>q>&;Tf2z%7a%{
zj%Q$j33435Rd$FLo15-t(ify@h&^YC80aXJI4hnKgQ>MQN60H|zj^uIQbW>fvji+v
zOLSP{F%;coNIL9RQD3J61LbCvdmZ~sxyN*qJV_PBixJOZbU;99Ce78cdKA_{6bcrc
z{Im*qz)S}psYRy*XI(Kf8R`HF+bCQQL{oGv?sYLen0;H(e0lQK>Oxh`kjH6)X>WO1
z4o0^1_$LYVL`Z9pbc&}^Thh7T%irKxH>hJ5W!b_q1}ojYM{bHle`}HqDCXXutt&Sm
zMpJbI2zJF6(;_)4`0tp3-qSW${}pC*h5G14(QCo4^ZgfY{r(H@a|M+WZD;fBvdQ=v
zr?aL@KEMuFNzlZ|OTU^r8`gXHs&k+3-5#H+CI;VqS!n0pOLX!2%I*B(cKouzV6n(P
zaEvPTIIKsBDU=KGI)qZ?fpO@Sup@G15;R^vw`qfn1{e^sUycYDlHRly|2A`WE^fT5
zd~tR9tLk)OyN=%XQ2z|y!8_7rAK$sN3J0ya2;Zx$P8H_&Dv3!Ed_KR|A9!E0Gp#_{
zQ8!EOxNfy$Ok@VMkBc$*_CVH#DW}REUK@9Pj>qqBDCPB*R8;*M)EQD&TDNSrJa)fK
z?^Ebt&|_nnm5{<IVs*Lp-Zht1XuA;k>}S&$>0?(|Fi&v1<x#fvze^?zPi|mTmdFR{
zx2L>g&ZfWy>u*ZNe!dbk^q2{r_quloRHwpKdm$%LyllV%hWTbraJRml(<gQj<6t>z
zk+23c&AKolZYw&Wk7f<XPy3^6H}O%}gc4^Om}y+vp38Au1DhJ6u7j!IkEnSNiLv|e
z5AZVkZ@>#DYn-lNg9NcaLNq2TfBmNZV}<H*8a%{Lv+r<pix2>!7DbQ@XN}L5VZs}Q
z538pwxr`rx^(h1aUwu2j-9kraVIf*z{;-Bw0R!I{WBBF4#mg1==2AWk$`-gkF%a8w
zJ2E0<GL3cAdHn~%7I1C+y1iY%7*12nh?Cn={G8v-;2Wp9J(&dggf%OM=f@3!rYdsq
zH&%v0WdX%(e2_37LrMe|lNMtvu|9zaV-k1Id<IQD{U-4vd(jk5Cu+_*OmW6sTct|?
zjNt<~f4Yn))~l{ldOie?<G(~$vt%6lYB9}pVhVU1yxe@}%%;*!@;8oZ(kg)$7IDer
zs%t0+DaixU{xzp8h~|W42tlXA#59v%vLV~To)--N{t5cDacbF}F-bpZ53n3<-fls{
zWnsy{N$s|=fh3XaPp*5|ulGqYiF#<zr@zdZ+3?s^WjTRD@>0*N-kQa>Y*uZ;fm|%l
znFe*zs#V)0`;1|4W_Bze&1z5p3_ygl(5nV8RA9O(Oe<(7$C-))td^P;BasW5l}s=K
zR1eD8i+2YgDntNhZ|aNhOo}<!hhEMu*L;QLO9fq3b3>RA%L4&7Z54IICWS;6E&yaX
zM>$rFZY=IwHJsF#XUMj|qtgMCW--17N$80{v5kTqNp@0_5Sv!ck#mMTNW~MtboQ4R
zF#EPWul>_En^q493HjC{2I@xI)Tt)j=(w;hIzULT7ZFzxj>i~nls|cIcO!7nB(+<A
z=lSxK>%-QL)svd?YB7V-GUN6on;M;9E<HGnA_lr(75x_zmRyI;D1#Yp*{ANE8OZN>
zs5A3rUCyeAxY7J&+RM}^YxJbTbxSJxqlw<Nyq@$McJEtdh8yn+W3O>FGVb$Fe!{c5
z`_p9B?ZV)_{jC)@ZyLRE#(V1mes?v{ODf1^dzC5nlkT?5&4;g7FTQ)LTcz`)sZnH^
zpEKKTfURm$ZH!~#y}d(+pg*5C)5W5>9Diu<{(%2Z8-yyASaAI&VeV}puX;jKWkcy_
zhbOb~QFF|Xe^qXY4dhAdvxMy~Uf#VS5UVI-t)Yf6`f}K1SmMTYOS_8)a=6cnR2KwM
z&s1bq0LR(XB>#RJ9+508yKN#e<HvN}y>l>EZnEa{?VM9(Et>XbJY66B?77wopDA5A
zFnx0FvA~G!+_xFgP=zcE%b@RgSCo66hE5AQWBW3si4J@Oar!$5d_nLV_k^=L{2u(S
zwH937aJELah)-@kA?+J-FQSkK$tl!*Op`i$+i&*zwb^I)xkh__F<A(uo;zbR7;PVx
zLyn32QuCacUD$SL>~eoR2r*Dk2Z%e_UTgNb+$KM`ufsmFvCo-21QYp>4W!SYBkCOY
zi$TNMSqZm~E=iue^8}Z?J4F595J_cDDol?lEVt|D3rWRkf3*)38ZR!DxOy+&fAPBV
zyXR%T_*7H<!FownwcdnO&toJHPSz>Hrdh*w-_97@qAaDt<d)j=a!0^U@<F^{oCY4F
zv!r{MGV<kVv72lqaN)Z=;=<1zji*cOb7=dZi635}Viu1#1nq^R&GzP^BiJYQ#|J;%
z_;q99)Cs$hx2eBxvqcMp5&8r_Pu5D9)VAFqG`EmqJT0@EF4WbQClKd(?Q;cEDuZL5
zNwvtOz*tv&`=;VjGAB{YrUPw@Mpi7hb`9@EuX6S$2Q<<U-$?bx^4h=I_~eWdAeoUf
zSLUSj;hlk<!(O!k>OO0MQHoWU53a8)X9RWEEeRw~J>?X6XQ>+Fh&P&@N;#{`@O$Vn
z3MbTZF#E~j)V!42n=$b;&lY*6Xo==F@2jj?M_~eLS5+zvu<y=V#7_yZ*PolcV<Y~x
zO?rL)5OafBdiu)fQwG5iA?-t{T1x2pu{ved`N1m*pELb5=Wj<Dq;O+%!ZT#Y(fHgl
z#*v01B|@H?!h-6Dg39Y-BI`MTi%2!Y<J4)DZ)vIf2-DB+-^PpfDiwW2f~6<0c+9kX
zzyI*c?+xK|btPvdhJR^a*>cp941L0-mf~g?-jCP(V6<u+v9t1fIct*d`O%hcBj>yJ
zb7V@=VG_kD{#i1AK<k-tG9rcF?-c|eyqO(kM+ZDGBZ$MSdcK(3%%If?L=6vt8wi|_
z_`gOvgvlg0q!H3!XVFy(!UoRNbQMCj6=f@5u*DpLcfW)z7}Y{en6M@_vljrcT;hV9
zfr`4cujzg`#e!HkfaTQ}2qG{#^Po&Ct6@b1*DWFm%c)UAb<;C!3kkNs`x?e!_WOXS
zcTM@{QRCNFFJpas#ftNXLt$jCN2598Vacr6HQ0=48ylun3>xRk@G~h)UN%%gyB!)y
z^!*)KEocGevt16J5C?hSk73-Mp%q4#j~+Oa(q(^XPsN2;J%KUGF|vKZa-#X2s|MjU
z01JfDjTA7`Ko*>P(xx}+n+`ZyG$3sRlj|UHbv_zyP)HLZ+nw1ZgS|`wi=Af42ot8d
zN*f&Ur70FLdBYzp=!wgf^VlWK4Ye}B6;6ceyF+(W$lY56$EJg3&8={)`D(ToT}D9I
zdUa&2@Nk+iA`lV$%vHm8NkhUviR#FL2|ymYvBYx>N7fGK9givY3616Z?EILYEg2%s
z{>pWyk-BhUMAMT9>rF6~KY>&~=!Uj0$YmUKv&pNHhY2T@FRAS0W{XACt1N1<kr!a~
zDrU$|bf-dlzDy^&Rr$NsApd?h^reTgxv2&ve|V}ItaFT&Ua1y?73Y8Lf<YIxAXJIM
z0MCY}>b;Tdb~FvQ%P#Lgzo2@V*livNo1Zu0cj1e}@T#}c)-}IiPi)WQE-?kcHVVC1
z41jgRgbXgB1AcSpW#1-YF;beZON5>phu+NgEedJIj0Y7f3;gOrMM*fSO|9oUMZ;83
zDF_)S4OJ3E$CDg`F<LKwY8(Huwc_ZVQ>)I*f8G7Mca7nonsL41Z5P)PPIf%eXxino
z?!)!TA{o!^Q1=VkXy4h6?;*1HUtAL$k6`r$^WKl^ug->KXwbG%Yi4x7(1jhN@a@6P
zSJyv^YfDBbLwbu~pH;SAzbR-74!3$Q-*&TEGHNt*{c2g|jS)FCyn7RF(fNb(7+hcU
zS8TPvT6BYkRMFb^pZ(7wLN-)N=5okYD5EVIMyj~#&NIPMOp(SL1?)gE^ZBT>+KXoV
z(~#w@a`UxS?3UT_l9z!dzrMvNwRT^*7cAZe4^`P#inaCf6ikKjuqcXvC7N^~;^9Hz
zTzcFGnY35e&HNX?1<9o{YjnAfRZ%=A-TXXmxNW_BPDto8ya@U0QrO+|Y)FBUxjWaU
zv=Z8$WQRBvS?n?2B={p?^OkSdZ>0BiN*Yn$8B9_o)=16GyrcUsK6!kFw7#P3JlB)G
zw*MK~K2KTi|Cscup3>l{*;5W_ua=d0&R(|UqIu~4TS8^KG}-d!rsS1kOS|3xrzs>M
z!w=ql;}yopY8(E&eMRU-iCbwl-`&_e5&S9xs`TNQ@4bQa%ogK|wMTq3cntLs&ksH3
z)iY~U%1=`W`Gy4^h~bW!4>j@Mh)!B3%NO{=G*e<-f7$K4zh_in(RS1Cpo%K9D;P$(
zE%+KuvAJBN+j1^gcE+s!Mv9SI?m{8#cdiooSMg1Rl`Ti{>0=jo@O+hCY_c8R+U447
z2wa+J^(ke`<#*LyJopn|7nj&|C<xohmF0)Mv--4(YTCASgfl!}R_KAFN6qEobI28Z
zO3LY|q~-w{|Hsg5%%*0NQ%k&_p11pXE~kzH{H@{Ya%do7*@8$IQ*ldn8n_fjp-t=x
z)K_nnEo-+gXJvipJyu*oh^XhWc=-sgOnFiG{@bn4Uwy4&H4e|iVla}M-$E{ze9PSl
zH10L{#;e19{<|pv1kBdIB&OaEm~gwv`SSWHXWCpxc29%RhSV|p-`mgSuGrgzPoD@x
zMcY38+FJ2CjyE&#%Rw!X?9r0??Q%n(nCpdm(z7X7(FQp#lB&0(U@`0V<!il-X#-a+
zJc3(mhwHRHe0Y<g;&k-);!nU7LKaiEzWT-QL+?XOZq(BuH0GpCM}-U9cbK(v{O=dw
z;pG*r@GNsna2UB`vr!h){J=}Kc$Zx6t-WeuL}qnYPanDbVQ$c@kJN_ZHMY#yVWn8<
z!!l^1FP>CS#;N+a9V2HYpEUCVU!#(FIGFJetj~L1Rk(uL3EpxFoKFW028c;KeG3)d
zRUsqB%G?`>g_~5oFN6-9{qAsBLk9{3<8?)kh-s5Rk2GTz#KY{9T{Nr_q<EL!>{8Ji
zZx3C8aTnq3A2L(g_xw6TC6oo#!W4lM=taXANE5Z#=TcH!sDfc43<|0#fHA2{@Hs)i
zu)yxEn5XDOA`&dNr&VU_T<*FTX3epxbyQn8693TBaTml;1omE~GjgjexXJ1wsr5w7
z3=(I1D`2N~BTi*`(ezd$G)~SUwt5=y?JJWg_?YlE#zo-BnM8Qkk&5l2?|-db$KN+f
zyeQ(Mz0a%C#sz}m(2F0(lD#ykxSQqKpCF6zt|DZigl+ibcZGH+9D)$x*bId+Wz;@V
zVceNbyAa+P7Li$f5C#XvULpbnGjMiUQ5d<)wfq<(w-<UrQv)D&#6z4&H)#)SN3Lq8
zgjG&9?l55)0NQy=F2-$wx4;tukFZELV9hcfrWh_;2;(6m2{TK2AtZdV;8m6F9HwLu
zen?hJMo$DGif8o)Qu!fy&b1Srko^TKY<Z|&5jo~}1gn8q#3eh%=kV`v=a1$ej)zug
zajcr=AT9+J!ps$L=utDfgzjJq>J3xv9kBMT3#Lx1l(R~R%>I(uWgM{J_68y>wQmlE
z$Fu)dr&%wLn98dZNGhNDD#q_p00Hhru$L<WzlO*>O>HKyZU(K@bpO#9ml@5z6Q*WW
zW#W(w{7XP^3Je^}m`~~>^UtJF!tOg?uCMNBm`Qh3W-(=!5iIStjH$8_Y}_sM>X?pz
zIqFKU$pd`#tPDKio&7*aoSSzcP_99<<-Q0RU(W(f*LF92S3u7M5CYh6HFLmhUDf0T
z{0=_m-g(BX>orwz0tW$}zl39^J}cN4H!gR*1!1#r;Yqzm2ExyLyAJ)`XIkk1YjFAZ
z-9XIYg2Q+=EwS^y4JAnV@dJb^ZR7Z(bo71Ma6S$~@$lG1l~I^ibNVXv+_;HUv#MW<
zhp@Xz@VggTfxXVJH*&4wnM{MakXxlc?i=6qZN8eyq7{^TJ7$Vd{ymzu&+s}>bLBFg
z<@&Q^DKzt*G`j@|3nEe;Y^z7R7-|;Ng2DPoW%Ff-e+u9TiGj?UjvnV2W!X^0xvJdS
zSRYXw;M00Ij&FMrPg7+tE<j1NEpaRkr`rVHMo0ySzN}<%7DJ>6iKu5W1(HATKsR17
zXp=VtinWD5g|CQ1#%JS(iuTdfcd<XoDqT6P018K@dQ5DsBcfK8j~;HhyI}FFud_5C
zokU6g{)M;NnoTx<PsbZ==!t*$R;X<1j{b#^{P|=hRZfZ2{1zVIO<dU;XYkRJ{OYL+
z_kODJZG&s{Npi^e?d;CHb*}gGVM+t%oh;v$j^bYHYgbQgwO%P#&6`en6Pm>vHk=+1
z>~{F@4cC#@_un?zM4-U_p}S90aJ+=FOODvqn<=r=5ANHhsJ`<~|8(buTJ&RGyZ5RW
zEy9~DrG}{&CZh(Ij>hLXd;wly8fZVkH^g%HaqAw{)@`@dY@#1-7gsu*Fe8jLg#ym!
zO~$ZOpR%2hWgR}An@#sWX=1ocE@^(L=$C5Uz4>BQd538Aw&vX(&!m=~b)ty3?{;e-
zV^ir$lcNqaTJVv#$D2OQ3+}x}df9i}TnV|}cQN0`!%|w+cTXqP^|oJDI3*UIXle$<
z&1D3ot1lo^OxZNN(zhMf*l$v-%JS^ZCJq|$9>K@;_;2&bMP%!l2kw-f#!XE$JZ=8#
z+{;U!kx;cK7F5*gD<AfC8G&H$23C6%#q7bO>d`r{WUk8aVq)|r6t@qxA(i^Y%HQCt
zZq^lIJOL4JPPw(hzAeD|mk_B!sr*@iWn}?;@6M;ql{zq2R#xyt1Q#frb?nNuJLdCe
z`Aa0GIn0`N$y;Tdx`UHzgHNW`;_^iGUi=k5`ZX#l-}y_Mn(&F>t$T6XzfJ;O#{NpV
z<PR>lybb4qcMIl%t-~CV%`#*o1ilWX>U0`frg}AlDgmnekC;E8lYMM$PugZVl42dy
zAC1@`(gBw;+y6!5$5ykfkA093uHo-#T9VUhoGJh#h2OA;<U)9%;G?VtBl`whoi^kT
z;7n6N<5qrqmuXmX9XptVZ4+;#1ETS9zKIxq(wZ+=n{%wvrJgTHJ%iV*(Ph1$OBBl~
zxW$of8P*Ub>Pu7vo)hh84+OJlTQ&Tf`f%X4Z?}1{56sr`h?v>D)-&P2FdgTQA-#{8
zGn_Yn$Z)qiYW#WGTDrH36yMV5c;l#M#%u)OpfdvRlDX9$6CVOKlZ(N!_0Gpt0@6l@
zhdEn&o9X6->MBzMNhmqi?gwYws9fAac&jE5rpqAn*u~Y%#z&YQvs#S7Imp0-1*w)P
z`ysPfUWJ%jp>N&B<Y>&7K2u&%>##ls(^-AMQh;2%>z-NRlnJ-_HO<gqW}j4z5ilq0
z$bprkSRNtMfl?#x?e?=tKX`;vxEELfVKzPakjQW&RX?$~%Gz*yV4v9`O&AB<|Bh5a
z7A#WMFJsLxEhK(t7z#{Y6@=UO)j(&)_nl&ERDSI<GYhiOfo6aiaJ8z=km<He;Xi5C
ztKrs!o~JN-eW|~AXpjT(ZZ3ol$BRQ@VgmDHvKsShemq;PfaC@`F|0D0e*--yP7^$)
zx#b)ggU`u<iuG~K8_9;ytVwJu+QOrxjH+@0xmG~N^f|F8$FEaGV2%gyC#b9vjqhUN
zf~whVNfB)5Esdh-cs*7q-WD*In@_)`+om;rK*|c<&hPpWa{yYnlYx6r+B|;L*}<si
zOON_S?u0e`sSnzl12InruxR|Narb)+`R~GJAFw|4A8#K;+m-pp+s`jLj%7MF?dZ(a
zJcp(O&Q#sO%6p8F<~DuH(b`G7jnsfJn~`{S7`_ypr3N}HcUHnDZ(FiP{fro&TGf#O
z%b**hX&N{6A9dCc+K$>~MbEY(^%Ei!;$TYR!(l5&mqPWO^?6pc+M70hr5!ZqZWpK!
z#Ta+D3q6}}^M^q6w1fop7qwV~b1t-nn0LS+hTMy0jl$~AAH-7YGT1w@yhOKoqLZ7Z
zXAM$#)OoLW$2rTFwtdF<or+i=FQD@!PTp)I{}+~xDJ1*{vdpkcxR;dFab)xD$zYe7
z9Cl_(Z~ikG&Y?xAU75)dQ8m?DW^>jwzos*8OunrFnA3G;_e*~_zZ&66JbwC~0c^|{
z+alN@7rr=1s{81ckA7QYvn3$KoNV{2XX%#0k?5#`@`U`|E8U3-^+fZ!KA3r`&PI{V
z_M&xO_;u_hpAhDrI1acoG+wngPVjiBamn3kT)OT?xy<HhS%5)+YQ}!eS30n0#k?NZ
ze`}4vxH)g0CfHVGM9h>>QLFNo1(3vX$!lLNs;7Qh>poBVplir~eJ>w{eP!4~k=7Sf
z?EbB=iE-8LD(qC)zuG&2<sonWc-S-Ust&&S?CRZCi13V!Q=JF{i*0fSNjM#G_4Nfw
z-9GX|&k3aZ4MONkA0mdyp5!vc5?q2<`G|r>4p-0+l+U5I$0~0+s@tw)F&n*K+my^W
zg<6I-2e18N^$+Ck+Ei+!fCoWY^jh8ZoYZI6-Zr-j^C;*MR-J3b+l47<!fy+!cfqp7
z91t9h97Mj6VWy-HwyKH*>@<oHj-dgSjclIrT}u^M^)6GD0$3qGVE_d-!}mbl__sU!
zPi=OOZ=4$FAfH!vAKM{T9c=%&O4*`e5>^~h)7PKTfhyD%p=S*&bf@Gu(&nf|qex3M
z-so7Hitqv$Z8jDm8j(BsKcgv-eVn(5<N7Mji{bxb>&)Y!eB*w9j~PbxM2##%qiiKx
zF&Hyaw(NTwLZKo<w#hQclE#d+q`{OuAtg%)S;i706j`#2HQD#|ckbWwJm);;b<Xpz
zKjvlTeqGo7y*{7!=X0&Xt4<M*f#+MV$9`FEhqHh`D%K&{CnA<z4P52^0>B-7Tc_$h
zO35b;VQ&K=#y#V9b&%=+ECLx1eRO%pm-LTdh+^42u3&^7#~DG2(A6qK8u@Hwt05Q!
z0NpGh_B9!)qwDjVc+$i~uNRsl)#tmeveri=_%5a4OtI&CGmKfzu{3|sRqRC-sfh&t
zKJa!YWm8fU)>ZP^gnNr5!o2I6LXyW8dL^koy|EOe{Ibwf%~%3eMue>Xhz7|N+VLT8
zzt;B(5O=Pc*SE!wK@Mb&;=9I7L4c(*+^1%Pp%SZ)3{qtp-l3b+R?ZvxmHW4tdL{$k
zWx6={+=H51*io36d3}^wKz-hacy*o(Q0c#7PIByV!H4UDBRwCf?jjF*%v>ZD-eD45
zfI!e#l_J%)=5?*><tosN?!~iKz#ppLbcz5gJ=NrK8o&`g>TBBVyqb+d5$j#w#Kud5
z6GbG7C95w9B=M8F6#(BZ86C%zkwkTJF6r3Ti&d4^irthZGw9{FdcI&@zh_aRI$=Kc
zCQFn5=9v_@4~Y^4+8!HQpw2ROl#~?fFC_e4W&lIjGlS~5k|;%p8I1x%5^2(s9f3NH
z?~euWu=SZw%G?KxJ}aU~ydH+olNijt+YNK%qmz)0Y4(cLl|?s?0kxYv!yDPM3f^eC
z5XKFn4O`Zx%^(j5I@Dv~%0mJQGeWmB@FX)pnd%s-j?>=p7wL^2k;-g|+_6jOTXlo5
zw~e$AICQz1e0C>Zq&H9uVfO+W81`;}&^o=l|5ZR8<>~b!lcDrFhuyzF<|Gu<T7u_0
z_itxzU;E5^P|f_8(0p-X|H7X-<>B$At%A_-(A}#T@nHP+&xbLO!iu*;{sFGGg#3V>
z;wNCOuo0|9M&$uet%EsQ;(&12rs#bBa+gp2ZuNaJp7$B{QctxQ3R3ZN4anzmZ_W{q
zVD6KbxAqNcf+x6-EjAENSbee|RUL&MvR1Daw=Tlum4XduI(pvT`gyKUQLI?9%dS5S
zO4jA=35(%TcOk8JB8|YOCVwKYq}<()hX*YRvR3G&?N}X#r0Z-=Zz|yJL-FkMkaW2S
z?#%28KD#T|#H9Pjm+8^PzUZLxN08f3+r%C`89QHMl%WB=zB6`Ql84$K7Vr<yEM@=G
z<q`Tr#W$PrQBLyGqTSkhJ<VN2a81t0MY>+!K*+h-^|Oq<s%!6=ytFH=*52*v&Uxcz
z$Je{uBSdeXcp1zu_aS`rA_gwCW4-e-VUszt{`UE?XTgR?^wmZ_eTi#0^v&{>%2Wwg
zduokU)ZX@XJ9?!R84%?>D?`l~D0t~d%7^as>CGFlZK-T%u9?rkBP=e34Y;gi7=tcM
z!vWvO{k40EXPir){AEDDg(inwtvs||$T65C@~HP8U4Hd&U*c>7OVZ<u6Y!fJP;8{X
zh^vT5NXn^8A}bw7$>Aw>uUT=>64Uvsj3TKJCHT)&#@Q9_I7Z_Ve@U!L%#oh+$c`^%
zq~%Dm7Y2fs4UPPueu03AoBEQI*ARS1>8giU*hF$DAEtC*6O}LC*YDSD|Eu%9F9+kH
zdg|2EUG)rF|4VmO%YBs<^Y0gguAB0XmL~R0o9?*K!%b(4u_K|^kzT<J^PBngEHyLZ
z=BT|J(8GW&`qupS%>`yjDY^u`ZP{<LalrVWEl$2D;ioMcX+A94j#UT-v`7k-sAC>Q
zKPx;;zMsF`!eY3{Jlx4?qf2~@wZS#Q^Jo*VPI|Pmg80y56`=O<N<Z6+-n<#(0oBRp
zjH)j8`h14B9#jB0O4sg`fQnYl>H6gl@|L^56_%tj%cUV9o~dfI^%ez;TURpzu_@By
z>8Pq8vJ!@a!+c=`i}Sx3h@4>qfNPIgB(JN^qu$LTB0^M7n(K*2CMTM{7qQ4tpL8DG
z?Z&sAqZ_5cRF3uo5lcT`QIX?}x3Pg{g=Fl>E3k_kWDGZOUCB|QL2ZFt)>tR>wInFk
z>yjFbsLtZfjQ7s-Q*7^6l5T@7S~rUDYxKpw<!x7}#V2XEc=;qPq*;rv4=XP;1M=O<
ziKZ3TF9pDkh0SaG+83j@#>`ilqTU+R9*9!Yj0l;Qkv#EINZeJ^HBEOfOGr&kXwLA6
zT)S((3Q+&rZUfa98z>|+)6cCv+(9f1N1sWS|K6qvt~@V>v{JB)0hc=7_}3G)VLh4(
z=?Q6{=3EQN935jrOM0oBIg2xUKb;Ea;TcAna6|MT5VLyI0KGL9nXVpFH6ayGud3i$
zFIEj7S16n`qeAlbi^nSCc({(Butt_=eE2N;G0`A#=};w=WugDdFXh(gVj7roe}Tky
zyCQP<ivNatt2_1xT2u`{#NI?oC}<Biz8C2|Dh*Q<+9j>$wtRrDJrboL4W0BuoK(4%
zy=AUn(%;d?w2D|DHjP#Xx)iCsL8Ya*yNJcyteq;cYu*I@P%W7znW?LQZHD@x3j3|t
zO4fi4&ECtD_z_#J${cIp{Bu*yF(1Qe6O$is%#k@<n?NC7;LVNBQ;7`vh-XGww;k!C
za<9O*c&5Af0caYeq)Ndver3bG_GsacPeyXKTq3Dz?mf1S<nUJIr<c;eh_6q^V==rT
zZK3}5hCsJS#^oS_NKat?TXj&f5lkjWNQP!47bWU?<9lPZPkEecb<j43@e^kDNN5_X
zOBWBkpl!N_*@D0x)5NyGcd^LLUxnZMX3#JuxSwxVMyx2~K&vq`By{29f0Li1{|$cL
z)hbKf@d6Px7Fw~1zX*YY7d(~;H3tlfqO8Gcho{}5<^G{}s<hsB96+J}0Kv9*zhxD)
zE|7h$eL?l;){fWeU1fCQp=VutwA+z;v**m0?TdXg+1>j+O{u3=@51vR?dW53SX8^b
zk4Pa6bt-uToa+R#MA>+_VLrWJpwQ7^Y%FZOStDY5_hifzr7Mn(Mu3cyd00K&WFNFN
zM5qbILR{=l(ro}APj7E*#|lp!Ji5%lmox5N$7-`!V8Y({-l|Ojwwco_IxWaw7j^W?
z?y6em&Oha2c97N`oJMzff7bA<P7doEphc6pAlKCwOD`6LUT9hk{Lnr%!tm4j5%t&L
zPbehTo%>6S4s|{E*dI3(IA=NuKes^eXm|Q%xs~Jc3xv#uY%^*PKeZFplbU^Z&3dzY
zXlZ*EcDC;30-V#|h61{N$Ey8l0*T}ssUKt))zCA!vRMbrIa~Qwm=d4e!I*|MD(SCG
z7@{S#$RsYDO;!wkCMWhvoS5yc_H@Xi={;cAbi63(S^(65U-sR{tYJPqtVW_T*hMpR
z%VuG?fLV8M>QL07t#BXvebMDXo5-0X29rs`f=HTRuN;p)L&O8~*6ev>Iay7NUXVQ$
zHhicO7C4fq+~=1=XOxMBrYpvH>j{Zfz02fxyBKo%`<>nr{ad3d${Px;Y|uyh?2AVP
z-|L3V?TVEV!QqES<HatYh$&u>pIp9MyZYy1=()n@ttmh&?(vv+-u4S}q4|HPzIFdO
z=GxLTjs}Nx$sMRF-nYSGKI*NO5k-fT2WC<*6j5%MAi2RCHgI>fn6V8ygVCF?L%BXx
zxK9Yhwr(k$yz{>!LLv%x8x8_<G@_Klj8R0OK8vOi3{Jt#W0?<&-M#R=SVPM8$B3m2
z875+bo&!k0GL(1LUx%UKQW?oPLnsao=60pZ&pVQiV_OUrz>BvkXTeyuYWS*aUr)r`
zHXg?r4>#ZGHGl(ayS*`+`^HdF0tg&>;Y4fQ6$1Y~z*x*+3V0qbVDkB*^KP#7B)P#+
z;(M_na9PEKSs4vx0*+lxt&Lub0G!pDHS&s7$$>P^DiN-dnOKW<I4p!M2`&Vq5VRiN
z+)L8|eEN}ws;<<r5UbP;bMkQarDZP1%qbjCYm2|%A8yqS(Wi_8bQVT|2pRCrb6IJi
z!3KGhTe!tFOfF_%-1S=<q?RXAtpj#$tkU4lXVHz|Rx1&J-j{jh?5RG}^8#W5G&F!K
zW!{V>uAYmzB0w8+JeubAEF9goXljU~62BP(GL8n~Ah~CNDmYAjR~-hMnb-ZE5h2<}
zgL&j|%uw=J-dIabt-&$6qbmhZHJ;Kt{-8U-@7Ai7mj>KF<<S7_N#|U)@B-DB`Z)P+
zZ|}f)gDIMLyyAdZAMk@Sz6zcZL20Kdu<<Bht1soZol6Xkli;E+$^UK?V7D@BTw+t(
z$dMlU*n<C==KO%W5?-Z}i+hbjOZ^UuL*1ssF${;IQ@6v~SYzfZ-~!9z@gIrsBF?kM
ztIK)0?9D8v#uTjE5M6qf7RnD^XavwL+&lSGz7n$rv+M@=O}c3!Tx&QE@{u-4FkPGX
zo_EV6pyl{$y{?5O?yM4ro|I?7zbog6ZV(bn+mn&%#W9N)N#au9Fpy>gsx;TdLPGbq
z*9K6j!BlPogfEL?KdkqA<53dNaORZ$@iRg*D{3mQ1+~pU{=H;I-zAP+N)2`(L#n<Z
z$;Eu$ta4Mu?kq`rzxis_Y2k>mjQaI`X_ixs9mqg@t5zi_Eql?MYT<eN)u6Fg4Dy)H
zB4g$He@yOo2HmU<Iqilztle`yym8_GmMu2^OSZ^iigT5-o*6FphGPn#f|`7R>(eg+
zJfBd4@F2OR^05FRGU*sQ8KfGAY&ksDjT?e3eolYmyzR0#7A%ZS@tum959h{b$cO$-
z4{iL7G7}W9yr_CPlp#ZKoduucsMV45lJ-V51}!&HL5H`sH_#~jA3y;kqZ&S(;nUCz
zm29QM#adoQ!4iLIeJMjr-ZooXBK6|x#eJD*m3q-8cgf;$ly>c}AtDGA(FE6)ny`q9
z7+)vx_G^vFVd^gaK8vHY`VzzKf_GZ4#`6~vVpP3TJnGgu0m%@_;?R@J8Zk93?x>(B
z$u))wuZx8rdfxmZ7;sLR@YF_K5PDjjV5B2M3S2UEK@}-KQ~{!WY+6!~yT>NIB@dlm
z(PmE^O8lVpmJM)K@Mo}j0ixF^cSIQ*B}OYHVGlHg#}7`**{*r!1llYz^U}606f~qK
zAv<gO$Me#!vfr)=K^2+Z1s6KU_w!Rj?K8!HIXV}TdY*9I%bifdU-_0A(*y{eifhpI
ziFH1v0YJ7?>UU3!4ft&b@`;wXitOIGUmL)QOb$lR-#pPUXM4zJ^DJ}3zMY3RU#EAa
zM2A(0t$TPA(W(Eu=?=R^mQ<!ER^9C`Zya%V&BCZAS?D&U>-AthEoG3`#K*{~>Q|I7
zz$}qk$+7+Ig>G2rRQ}eAdAjJ+%)#?@c(I{{$Bo58AJG6Gtl>2BaJYJY!dUJ;_qpVo
zEk0|Pce<9WcD#a>gtzbiW;wQ+cw)3DaP;eH^@T_m8=Ipn2p7J~FJb;{|3d`H|N5Cv
z?j>`4_iT)eEJz;CYr4$vxJBM|47UT9i5@_?w;H^<uef5CrYINSY7wL)f1~yG09Xov
zt$8VGQ+vUafA;LkhxSBimq(Kdn&G^;a&Jd?*~<tSZgZnr>2FWbCGcfTBDcPd$Oo#p
zD83WQmtViM4QBe(7`10^#PvA3iWJNIk|Gsg-n&7p7SJ%)PtuNxDXpU54&m=IWh%ge
z?;(nrOn40i$IO{4l`r0kF2|?YY}9XM;h|HVEPT(0;tzuLRHi1qM_<nCv!83lMhhs4
z@6+CI*3$sbMN<r9N#8`BODtK;iUnRG;K*7(H_Hm3_}MrT7H~7^H0O$&JzNTsEPBQ6
zEG!1~GX>DhF5<a}q=vKbpy}*2HLMK0g5W5Gl+fIG&>QaJJGami-z+8RSPFLRCHH>4
z9q1IuxTjq$BGDCaCV`W!!5SF{==0c4k6ExFHS|@et{V71*VE}DOsyI-C=pz6JTmG@
zAS8jaT8R&4l@S+H-vzr?MD<0|j6_zI7t$=-nWgh-s;qN30NzU}I$tgWc|Z*#H<|Zk
zf+0?cE^oqPut8^br?SyUdypN*>*%iF#nv5a2>6AzA+k=rBI>e`_NdHxefl&vmDjT_
z1;Ex3l!?JI&-&q#zI$a++SNeRB7VRO_Hv<9s@?R;L~5~7G>0KUOf8!~#g#W+IUxbZ
zGbYEyb!_O=tEv~`;z->BD9E$!b(@-xcAG)9eLBwTjIvQLmCovwyD<!n+f6jOp&tYr
zM57dBWwigiZSv2XNuvInfg%a+`s|po>ME5Wyp($d&ss>pv2tSSR*hbN;NwR4%*`(#
z?^a?mA!6nW`@q!9_{5j|9FP)XB4^V4ddmR8U|l!sJM_hF=6h^UJo`l|KLk}G@Ops%
zhP&9omt<yN@r^%Mq;=XDmVeL((uk2RM%iC1nOo7^>bc%+G6a$L0iQTzYIrFXAtY5b
zk^1zMPtDm__28xJYnejC5Ggl^{@laf!|DqJ+01jPW2|ao8`EDDU_pl6vCF2vx(I7C
z7rdPgSwUrE<iC{->HTOJ=m#kXYNEJi79(&8BfyKQdekQwYio0=s+lPa4DLns@XW~V
z_*}C;-2>`*HRto^vcz1Lky3EAgb)|(R!{7YTg@iZn`*giz7kn&ZCyU2^Ee?b{vWy(
z<Hky0nas4Ngp1JZe531XQR2XkHhOF6rhnwqo*+vw+4y$h8RKMRJ}OM0<~{UR<-Xj9
zF+QM^g_|ylmF1XpoDnz<xE#L8l005-z7We6INBnt8_a85#i8)x{4xB&dh3Bjjnv~t
z85G5QpuhKva?gz$%TcdgPfm}4n^V>yL2pfZ-%F3_aKXMQ*SJ;72J&JyFjp%os3A;A
z*T4qcl+0i1|B>~y>b)jd3&}ktvZN6|Udv0U$Qv}yx7T*R&v_NE`Ah&`V&m<_YUx%b
z5`X_^2;9MauK3o#tzS3FU!M)}NFNekV`CGA`U=?xtgAd>m@OkC&S?r~r>-X5^C5aJ
z0!(sD-Cy3mwK)=!bcYhXUgfu65WxInq=#&;<(u@k27JP({U=s*`xorl6Ys!kbDAtk
z<X_UE>zdusqKr)WgqIWLFhHp*7!xXCHFd{dgN@It&hXPC1J}!wGd!avqd)V}Gn_lU
zxJF>r7%d{z*#b9=i9Z1nvSd}ijz@pFSul(-LiOy)nyw}}`#1R;r?f&-OHBOimaD5o
zjlKQ>*lFdIdWGbaYm?P4r`Q8^TOp>e2O+yhZ~Y>D<gWtOerEGMJJY2n*7q*ItzT`D
zB|xBs#5U~js+mYgvtWmoz^mqroWtXl!ti9pPK5%sF=ao=WmfbJZql(#`{ZQ-@4_~w
z3gqOBD+){;_kaBcRxkCjK>n3F>|+JIg}HpXjKM!QBouOiePZBbzSMutf!Vurs8<E*
z7YTRI9}a^-4l>iWXTdjJDOi2y<W=|n`_54oXs*1E(AEnL0aq<+m&rB1xqeQwoOv*}
zbs2bvph`=<DeZzUf3dAB(T9o<dHeb`m{^YHiB1Vx>(v2+#|E!x&Ohw_0es&>*2gx%
zP>4Feu$zMd&##7wG&?=w2iJ_i{X)v}iCEVkw_jcU(b!yM$C7w9npdfyLFyo9Otpmj
zceN6YC@fAObW8C30?umJ(|^-saMrp4WOsJR3wGWR6A;&v4CQ3O!~x3fTsuy5(Ia_$
zR%8?2PtvVYxr&1?&ZTsLf#B<Gy6R(L8=98G(-;~xf?9MDcM~uW_cO;i2_qixE;)9K
zH|GDo6{A&nwa?2yE49TPk?39A;4_zI92-zCdg$o}@%L5&7zB6WR=Ctq&iP9mY)~*_
z#rjMHK1g68bORyyTB0m(vAbU(R|3@!ldsl*I=#}O>mDWLsLBtb5hvcWL*ecihGEEZ
z7Kd7k!JK(6<Y8*UpBn-n6{M|>^<zSJ*Q3tZk)U8Ebhq!-MG1|k4<2b+9`n<aoT(C&
zFimwSC-@39ol=xKYuZb)Tvaf5z^nv&)qb_X$V8_c3oD<Bq;ATTzS9Ymq*w5f29+qC
zib%1^v`4Xi1v(^On8j;PCAes`A>epdGEohK|NR`deJfAc)bzR#BD1bxGRz=(aR9YW
zI|3zyE6#VK+DqTts7a-{zD!z8*l5-ra+pq8H3iLnlUdQQF2ti#$t>7s7Z5>^Pi@cw
zY;P>|2xR9oPd1eUcu^jR&ZbvPnZtu32D4!+tutwWg>u;btoY_+uzTc=2$|5A8AfA^
z4~F-o`Sb3$rmS}wP@P_g_|w=q9VeUVMN%2sLUF_k*qu(8|BMRcm{G)%w`mTi$^&-W
zeMO4{cLkmR`gknhCP9R5WdF)goB5rRpx+?kM;FMr{&v5xRm2QZ@lgyTgPbxi4b<fD
z;t>$LR#)93L#qZ%Mtw4p0a{gyH!o(lXL3Obpn754vtSw842=Tj>$wHcNF*KGHw4zt
zP~jq87*>=%4n~A*<Zump8Gx2Bx=l(eJ^y_CRp*1hld%ZJnY{dw-qqKeO<ql0n<~J|
zyT~(!yGcDY%;vm9d7FrE9`64L&nBEoSWOY%Vi49Gj<Y>{N;t|m>#;&VxcJ||h}!=K
zMi}~Dm5pKZ5n=1ucQ{^IYddLG=4W%bNDNRzk1x%cSP<ihE|iy4a>~yDN771MPMD~o
zh1^4wJ6Z&|qLbpv>i1rXL^`y3eN9**oVt>{gd^W*nsg75TVDJ!`iE4MJCeXyoZN{|
z`a4uu{K(7z2ZaolYF^7}D+(~RC^b9j7u`vG%jAFkJM%4bC2#w_6$qdFJV||E$KZGg
z%<!<WYk@J|leqgYBp7h_#n7+ml?viAjNW~B!5CpHKE#$Nt6CCyd)G`+@THv+%|auo
z=L=z*GT!)lJ9hlGae0MMWd({TNconze7u#OD>tj>ktNCOK6cZ!DP4SC>{#Nsz^gdS
zm+pv%rxl;Vry@YzA~%YX&OUOup&*5riLLNCPM3W2_S?wbR_-Yo1)gr%u4jcKv)7x8
z3!K_3F7`aUG+*UI;?sK#i_M)Z-ON48FoXr)9zTEV+IBTA+N#3p>8f*O<QHdwh_?p{
z+VD#3_1a>)i-oW65dwtMCVuMoi!V2ujP5@cT+3|CJdt^1n~Zk#IY{@3DLL`pzQ_58
z0i*ZSmy9)?NS6jg>mBqK?4*v-_Tg08U6>%wD(Ib;_99lDc;i^&9n#~B`|r13(8{Bk
zk0;5P=M}CCUXSH6U+R6|;%%?uQdlrTY%?paXy|B!rO#CK&_WfX_#(GDvFG~NXk*>*
zoPEK*&ghs$mAPU+y-=Q`p^>?kDf@;?Vh3^~{tIZKLBh$MGwl|M`|sKtr)@055#H>W
zIB>`d)AY_b^T}9!_YuYRsZu>DxIk~EWY6KZLlgeH3aVV#Zt+gTMyIBDmo~iH`2znE
zY%N*TL>7l@QBYpccX`aAYGLJzM&t|_{88+Wh)o_}7B1;|c?#0!rP0lTtZq5d9LV_k
zC9RZUDh2WxC6@vHN^$%jxvnqAS#;mw<|h9Q6#Y-$q*5`y+E1Os#^y*xz|1-gAWWGO
z9ONcXo3en=-dIiG_^Yh5_A=eqvVN-UYs9n|wPqD-d?1lR`6vAZySFWyHowSpGrv@r
zCGjQ(Xpc;`kyid>d*}5IpB&<_J^L<TQRCzq?@{+HEbqR4hIEg_xtv>SSNATDNjPU<
zpkh}40G#;m`NNtF6BH_5kYd5AQmCM|(WPJicGLFx=bZG~gLm`i%2uM{KHmvu|EoPl
zZnCDbl1MAP0zmZr_Z_OaS5UwSsM>sgl3}$e1Rv2Id#lQmq>KO8)k6U1xlBX)JX)?%
zFzRW3_*T(UB39k8QKa<TU+|sbQO`uokfWkuE8Wn+1SJu`%!foUSGc6#Ch0eXiyVsg
z8ld9`(+psMf!LEC-C47U22E$tS+k23BTO(4pCy84sO50viKWh`>6}^^%p!%bC0|q*
z)2jvS&5p}!S!39-{k7T>T@VXB@5jAE!2Vx9W&yZpjD(4;4)N=1G^gh9d19Sq#ywZo
zFc@<H->Hr8#)&+hD5|UnR`G4w^~I=$VSTASa`JM$=r5t^gqIPnw1u7|l&66=2GR-0
z@!yQk^Dp@%)m0a;fqk^HYw?iFFg&<wZX-|h-8;dU4Th8eKsgopb&5h;FfUYua|k@@
zu$m&q#gh0mR?>(@bRsSDPNyOdm!QEP44e?)sb(YIYglT}mubZ1^dh8Nfd)<_8Ey#0
zATLEjotP-VmEn`7{hrE9TB4@mZ%0%^MFXefxtg+rRt~(rrmuO(;-ICSmsj6=(A~u%
zPNhIa#;rJ<PD7t_@5bKS&!YzWbSmon1H5y}8hr5$4wr&vR^!?@JtpIod#Bs70_9SG
z47#S3L>L%?!dtK*r6jEUG%X-D76BUm&{nq0YSIEWU8h>o$@teFzDN6R>=&0uRc}rc
zPmhEfG(daet56&e=0<)v4}#@)cEx&uHsHu74f*0y&!ww8eA1#zc#)s?A=s)KCxev+
zJa>C9g%j{|T_7|!BjQN8&fc-OnLV<$lBjqv4B1ZOlvjPsl!^r|mfYkbuak3HXYoTt
ztYihgjH(6gb0=Nmh2PU)wg8n6C#pQBe*}4vhZkq8>Y{VL%;hk7>{;deE>qY{5x@c-
z&m4KS=Z(^^TDlJw#Oe<2y%3pP4Q`fbyGisnW3?us6xKlcO6=3jp*(X!H?lhEkU|aZ
zzgwbzyM9;z(lE0x*QI%bvVBdR-JHIQ1^xkg?-oCBUG9Io&+v(^`<-)m<R4(}?7Bx@
zh~_824<)sd_n;M?5H_?g!^xgXR3yLBTYG(@w0;9!oYcsopg$OrRU}&=1@K{_H(p;e
zfbj<~aunJtA7DMN>6x|=7548G4F^1KJmJ3?Q}T$EAc3!x@N1dSVD|RKE{iHRrTq+(
zz9qXEpjmOYKsY@yiNt0Sm%+xqS(5GP;Zbt#H)qVL%2*e<Q~FT2W#7r^vq8t+%n|-x
zPmiLa5(Q7GB<iLJK82}`q`y<ZH7ps4(VgN@4Z?<RWQ45KG!J@kt@}KB;|3^nJshx-
zQT7n-o0&^XG4O?ZVXuHfXb)uUeItK0#iS+a8<$^(8QE6kB4K8btQ@uzYkqPsV#f^5
z(9&3@@1s7imw$H|G}0Wpi;17~*e!sC*netL-+*6Ok8XO%n_D^_T^Xz7`2aC2k3O3&
z>CgeTR1^CdFZkZlB_p>KHCg8tj2>E^i@qT2TW}*HHZJ-f;O$G4`TP5`p-;5r)EUvb
z2^yX?y#nGL-pj|HV3(1fL&q+x(GBNnIEzK0Ehjw=C*JQAa@WI`F3;R|U4tL8$S5K&
z1@wM>et7Qt^<QafOSIDqXKrsH_wo*+z`nh~FMc&2l1!^JdN%tSe4ql)r4X`>75@MZ
zmD69|iN9_@d9S3cK8#CqyO^e1D8d=PbRSgy#fhzm10CkPw%dy9XwsVRd|0!}`PbO%
zpPg)RV7dM$8z$(BA+n2kytJ8ue!mxfB)Uxo+<h96U0VLrMkJiq*(x>Y7iLa-P%#bI
zg^w>a39M*hf9)n*{xN>ZA7B~I|5NXGdpHDDDLOtAPI%NDl=tS|$JyQ5;vA2rpC8`6
z#nzGww@H+iFS+Ga;k#X&)jnT1V}9B(-oA&0L}lAw{zb1<Gt5_})NkgB1Y-@+;Na1@
zHc6+8OxnH*Ix8M}-xd2GC}D}d?Vv)HAovJ4t+1Q#vFr4I`NIE+D37UEaupISup09j
zrFEX-uDShr#X=AgrCxCgd{t?;^RdCJ=B;PSqZWVOzmfX$eqeldd}aHGj5m4Gio_cK
z8G)<{1PEYjel+}?IiC5ni;~~o6A*GhdftEjAK)wnir*5Eqii|+tTh{xKnYg^gr9mb
zH5k{4TVfvSGgI7a&9bC2B_1+1LA2pi6_(|GKOuN;TiM}Y<RGujZF16(=pv5Up5pao
zzU|%%o8>$k<hX!oK;$WSt33!QOVx^4qw%vDNM#Vspchk6pm?Jq_+XvGV1_N@hFnHn
zjzW6PWH-x#Bc95lwdR+rFzl~!(Ue9vG*j}vfxV9AB*|mNr1@xGI5bl{$w-n#H-t#K
zOBDw4VT8WNxEXiRun`=OTGwCS%_Tiauoe^>DRBo*tlwx;$q56KM>u8Naa)b2U-2zI
zRLhuFAsq?PYn27Z<c~;{{?W74Z>Vrxl2<>ebLx@~C_tcxHTJB!gi15AO4K^F!<G9p
zg5SDyu(e^dl7VJuE&@DN_3*GT3&uod1g2rCQh*;6oz0OGr7GfE-w7aP$75!+l0E%k
z4*<e39h3}&j|a_QqXu45(ACRR0ZJpb*z2VvV_mgwLxtACi(c-*nP;!*<>%ejlgjkz
zA)dVI>IpJKT(P2FxVyu_ra-VvgRNi4O(xlWG(dFZK*<e)UW!N<?s;%)Y>dOoF*-ea
z9kF0iFIE0{%#7vYjYZYI$2~w`M^Z**o`8i<M8>*@HIxG@3pg4@qz^EBNZpDRo}>B5
zgL`^ws&ZK&PywZySAo{_f;N>VHMG-^Hq6n5WGTWA7v=T&nG7R|5Wl=c=SH7>B8zOD
zh;QS2OWj~1CbA^+hpLF8eHN!!_ER4C&0EB?!#38DOe40}gSe4u%v!9UK7v^ivR=<o
z%9YjaIN5U?5;d(=pN7+Pq0WR2_*@OQnhGIi^!+W@=HO+fJv(3KSZo?x28<2!q3Pu^
z>=rb|)24uHgvn9r)GMjFby^Cdt&8A;<H)bZ!6MgHx(hQDX#OJ8X%1%v9o$5{YAinH
zqd|U&nKy-ln4Vj!B}&y1+#C&LkI2<aj#=7Ynlcd}dl_R>+ipVtVx%EDK~<;mqiiq2
zl{gYIZ}u7Ge9)GgJ&`XuYh`v~TdzU|B1_`a#VmubAA7}>^V-bA;N5@UOWL=Z(wiCh
zq==0Y%@w@Vgh}{~l3grVL-c<Dd$q^^1=ut6B~r+yy`opnC}hKg{_@^4u<Syjp=2J#
zX4@kt<KHgoFU^6ThuvK#cBhLO?aHy9-y^6<r@HodgVlrIwQYH4pCwfCDQ1FYbKeSt
z)z;Of)11E^^j46!L5nAUBy4*ln}^0l1+(FR)|jPV0a^<F1+FsP#^i!5KIZTQ9OAt-
zl5pIONqx+6_sirXtzhAXMM{^6U8ZxapO>1NRG+H=@FQNaF-2MzA;zpMp@OiX!Rz}z
z?s3Nib<c4w&|e<yY6NxohLWr_K|j`~xBEr!Q@|rr>Dz#NSF0b51m{JivxVHH2qnPB
zR3U%9thZDn)rT)T#}_(lWu$<RUtxduj)~w?CBe217wQl3C-2QTmAwRY4OE&g`<Q@2
zfoMas!MU36UMor>OfChzKC7P`WXWM-0{P3vJ?cwUy`O_uIvi%xf|;-NG6rRY^u?p3
zk07*m-X2PHPpljd3(Wp@>D<@b4nhOW<cAGJY`V`VXOIzyt8pDXpT4xU>o<^n*4u?d
z8L8TbSb`VO3?*v(^icVGp>#F9>AZ*J&#lrOAKZe#_k^umChYsociMR-#`SF^^DoR}
zPt>TC`_D~q`hK%5MpfB&NIRs|jcMaI90;rP?{BdIFz+j@N(qy$JxLLr8f<B7YU{ZY
z9johVC4}v(#Rt`2mvTDAmEHfcunec(NZE(p36)*eZhp9}<M5DsJFUaqX?g_t;1Bir
z(@H#f-;vqJVSFUh*wda!-IVNo1&Pw%FL>VD=<hE0qwISptW`4BYW5G|=(bg5TN_j*
zN?F!KTk7A0@p1`r<s#!h>v_I^p7h%U8Ff45gQ7O%A42H=+(MnD*aAUxO57TlTrr`8
z@VMb0O+_@c*j<U&+~g0~!(6ypj#FFj^!}o;{B%VuQ*=5r_0ubC+ONI^CBQ#0@UKsM
zM(1B+-&6rJxZD`!#QCS|E#&zvLQzSF?@{5aJ`_+v(>%I=E@9Z#!i&(KzCClf`n&Iy
zm2x7>&3b=_CN`TZvo7MkFt5V_Pg=k0>-&Bo65q%B9<&{iYF{bsxXfHaJ*M<IZe=+R
zd<n7tl><B4i;R?);ZeqM%eDABiX}U{!Qy@9!P64VJpP1msJskuIB4L&9Wo;{9sdxP
zL<OOq>7Dm*6R`3j+hrz+LV!bQ`&Tp|1Rjh#o5o4lb=|uS(0gQpFL;GV8q|4Os)BG^
zejzSP6p+f%%o~AK){n`6$4v`w!?mP=&AcXrqnyYdS@zU^xFE$RBSkhd-Eh^^(CM^>
zH@c1P>0VtWm6SkKK9K2QA(sE;dXycP((M@bm)B4SP|}(e@phlmZo}+SH(uBIy=v!I
z{-c&{pu<JJ0`PlUpFVM}J7}2WiE_nnRxiw3i(yY`FLka}_c}Q)fEAGM5%>uGV@K_|
z!|8FhN@V4#%+95Z!<3UP<^e+tN40%p0AsG|!uvDf>!Q-p?X<X?Yt@-88Iq_1u%N<g
zN}DOxmz5*YCOJ7%)s`hr*5SqDpsw3jc^g%FrEvkl^s0H2vk?Uj55AY0=!+62h{D4B
zN_v<`8E;`YY|pX^fzW)^EHubFA^^mpJ}_CeH|Nd6yYQ=%yf=G?oX_}0kk=zuNRH3{
z2!PGa7Jgyvq(^kZy@LrWR#An<l8u}f(t9`|OtqyQB);BNGm^$u6VvnjT(}1Bb$-i~
zHXCuVFUdJq@ufJ?*lxBH>3L$|sqDYq9wsjj%!ta^e(UP7a#<m{t_rZIIu<&2%LBP_
zJ4o654JjcSJn8wJ-*6a&aVCpwycZ#wRas!j*3kYOnWFH{@pdfO_^;SNgD-GAD?^&$
z(_9mUrzI+QMr4|k7tIVrKWEmP<@~TCS{-ZA&xYRAFJ_S$R9|Dv8>(uEECOrcG9P57
zl<F%`4~0|)nqn*Evoq-X?qTS!#S^;02J`k0C)P5bsvorkow98#2qAS>(41Vn^475J
zHEYeDMJrO5r2QeAAv<81V&~1&|G$c8_Lb_f)nkk9QwmDf!0vi3b)2a2u@@yU<=Vii
zJ(CN9d`&V)o1Lfo?$ufk7U)$M#Npdy+4G98rfve@srvkgq!>L{X7BJ)J1>;Pako2l
z=e;`NZt|OtK2^1;lV(t>i%J1ESGw?q+EU9)hBaI+Gu?jKT*p2-kJbXJ97;KrA};>4
z&(7PW8N-&GLP!n+WMdiLM0PAiGhrBt<ADd%qp*fW`6Ki^cU#qxBh(vBT~0Uetn6bc
zhFACKA3h|ybzc=nT)_;iccJN_Uu)j3wR|p$D{AULNlPhK5N?xTa%I6TW1~xq2(D@F
zs&9BL<;<2--gvmVJnu4SODQ@j-Pn>B59Rm9wFEMHi@eXv&H1$5cNZxyuF<3%R((9|
z-pe}p{U!HRaN)>km!#_*4sW7ZbU%hstM@p6*x|E*_jsDh(GtX1sQpI2Nwl2F^_T7b
z-}$Or1a^iCE56Z_l14Gc2Dk6Sf63=4g`G+$^#<<j`eIB?Ql|@2b3X?sNO)^-q!ih=
zK1i`@HSyx{Qs8#ic<}V;Tqaw)-b6>@9l7aiY9!D4B%bV*QK8Pq?hi81C22LPZw+!s
zW<MwLa~I8VFOs|(S1j%i*BDHkQ`U>M^SmG#`QoJ@I;Gp!N;H+MJanPia-u3*LqOA@
zaCUHGZrNEsaoAq2Bn{yt^!;iJeCw5$2=7f>#`E^Oere7X8Mb+SwzPm#LcU9}$FDzW
zhc*h^r_x8BTKhHCy|r;Amnpv4ZZxlW>em4JH20qHF1B@H*@m$k;a#U&`tP*Xt9%7Z
zFl%obu`uuD3%wm+R`lc>$(X;tqVRsBZC$`+hG+~t4sBrY?d<w?MW|$AO82h3mr4>w
zrt5u=!7iihGn2Y-VyPw1HL>r8XRmh_=RANsAK7A+g7jy}Y@$#rCAnoIhC?>J=RR?&
zjz07X>(0pfVOoiX4_iGw;bZT+Typ3HQvd(zZI?!t?ERO^n0vN|1OG~G9sd~vSo*(W
z06ogO_b}_P3-g!h(}_u50wcrS64S4YdDHyd0H(-TL<7v_<myoC-R;{GDShWq{>wk#
zjGvA~uihyBzF3pIakIosu5Yc`vaf8<_=ZWQ>6d4n<>?nc&%g&F1|q_Fon*qCIAe+(
z$rHb=o*yyO^p<G<oX{ZFx5mf`q(ps^10&CoHBv>!)u!nc1Gg$T*Ti@~l#WrxL+_Rt
zw+X=`hEL*zbptx(dk1Egvq#ox=ye(t<}2`WGtaYADs&#lwy19Dr97Cvp2Km31`m8?
zd9Ot4wcD}ak#+;oxgyz!a8hlhGtOLmSTvcCl;(ueY_+?i6rlA<U0RMv_Yd;TO>9s=
zDE##?x@5fr3lV@k5Fwi|e}NFdmu_7hRZT9daO>3;d|SQbMs8eVxGq*!N30}+V~q^S
zW$*V<pA5}3v|NL>?aQ<`;8?3RKs5gNl`&Qidbzq)sDjU$PsTkxq|y@&oJc&OSIC@w
zfR?dMJHod<GFr7{pry;EZ|44*Vsz4ZCs7dPZob{k(pt<mzUJa81w|Uhot6l(ez1p}
zx9AW_ymTUrVQHPL#IF<V)z|$@yqfRQQS#(ns!s*P%M{;yrQ+M*2YosjZkTe^836?d
zJ&1Zcs)9pfnqPC5mvW||&z(S_nZ8~`6$)@Z_ygGN)t%?w>D5tekP@qogtZF8-Nk<8
zxnw9(T8Id`>$f4??n!TDU$y%6=_p`KuuI*=f|Vlx0R4PutKNh1rKz+hQT)eIsJaVM
zn4K)tTpDnX^!kBwH&0nX<tliw*8%s1HXeb8vV?f|2B1xpZZp{p)%<4gwHz%>#746E
zr?-PQDw4X7M(>x6?K#KdNNP-^C63+x3L5H<_1|Yjy{JE5%9jDLzh?_aMbUi*BaP{J
z2D2GZ4&+X$hf)q}`Jt8KI%R9~l&`sDah^*=Or(|(v>Sh=zDSysqPA1I-6#L|$vZb|
zgL#tm3yD%9kL($tmI;WcdN=WjWOnfCTi)AgV?kERE2vAt24I^>h+=ps;r{nuP_n^p
z>{`hQppIWf-b?Y}0%i1qk27-d{}r}wON@Zjm}MoGm6Lu1ze_zBknV^Kxg#=@7?{WR
zDjGT$c1tSid}=WS0GnSA2)d3+^;8Je_pnSs@4@FuGFm0_#fOA#GZt~p3L`Krb8zoc
zo-_4Wm(s6^SMet@APdIg3LHR7up0z2g3#D<u4Jszpu_wae5^(GBXN$B$7*F|iQ^T5
zaR#!f)5J4!N$rQ9uCH#=O^L(y!#WRH-rFRs3h;X=-S6;yDE#}LpC3QxX7AAjSjADC
zKiZoPfm^0PmqfUJ%D<X;9C*RvVccrV_T@zP!P`e0cHRRU>uL)ojorUjD&jmAC--<>
z`s3^GBom#*Qn~x~5@Oe`Wx0w^IzHm87gXqxx49Fme_GFm^SOn<Uep{}wy8YnxvyEK
z_~}=KF&}c(Msb=oO2Ad}SK^8XjH$L@O8z-~^z=xRTkR!JR!V<Q`J3hPBLBOd&URdC
zU6=n1^VBN$uI2Ia&U(fC+P?-Xk`SkR+(ygd7?NQNgVaK)H!k@?GgcqNShTq!;V#Bm
z8}B-*U;94l6_7wo$_GOCW9L2h6?2Q9u{NptTip64Agy21Ik{}b#ZB{RL;ZfgQ1(jO
z>-y@hMnJD0Bu&a&*NY<@H|d;otw2}(&8NGMPLL|Y)Qp~+nd)Lp=WI95;Wr993OlOo
zOvU%Is3I#A&y-8-MQzFk<%lfvn8panOp{+5YRsNu;p)e6r;9qD9prU>^FW+FYU<!k
zpy;GMHPFAWW7=@2RJokIWqD&@=$3(ZEO<j<_-g|-VN>r=?OP9HmPRLd!|(Qcoc+md
ztNY?klX;cL&%dgkLPFwvrsW_WfRSBT4q>3h!q0#N*}6ky`EC@Y_$uDVU1_f3;<S)a
ze7WT5sQN6VDjFd)mht12JI|)(-(UJMF3-J8hCh^Cuq*yf&fX}|`yo$MRk|@~$H*SK
zdiIV~&nY+uf&1;dN|;Mh#zx!qbd*(9z?mowlw{p(7crT$jZ@;>8c#;_@DIts&%*AW
zw`C~!4*V!oZMQE@IDh!rQTxuyw86e0taMr0jYq3f<rAxY=cVC#I{OBreRn^4c;r?7
zet+2o2>J&Y0hX0aq4T#?Uc-eyxSmOm@8w#47xL)*@s%nAQttSlBY)&(&qwbjt5d??
zkrcAIAHE8-kQ~hEFXY|&aAd19py;40%g34Tb6KeTD7N+}zcNwj+YwP<y_4{;G5C_)
zE%hbcpo82atS$a7>S|C_FU%QAzT#>lmtuD;Wdj4D#*T#uUF>}uI&}UZ<MXQ#8#%k{
z*VqIIM-zkLwSg}pHZ-1{7rb#pSLjVO`Er7eBX^e91OFEG{rg@2#R@O0{~uO3Da8Hj
zO`GeTiVZ`QuQ@g;rOm!#M*}+2j1M<#r{PH^T<!y$kb}9yJJustzob98&QIuB?6ev+
z6nn<{t?qb$=TNh{^0jswjVB#LCb~ft^iXS5I97~igAA*&Rv{mr?0*_z_wnIr&FP(7
z=$n~sDpzPtrPilk(Ir~jN#lX%6M}T|;HrN4Z^rq?GcHW3{{xg|cFbLgu9`!)=5P;I
zp6A>?6t&f}B-H;=ynR<9`D~TYHAoj54lPRQvvu}(l+;xGE)TVR1GTnfaYEjANzX7w
zEoTJB+o_?Jzr~?!bmU_=r<{nNX6&V*FP2feE0*u>y?$w%)I0d*!N&!$Yr7k?%jblc
ziSQZArgpwnb#MG(V3yMhYcnWMw(14c8TB!q$yfl@^t24HTjx@@6jPDMksL*iF;vzQ
z6#V#jLGk!)YJJ$&&C;cQWDb;1r_0NB_+n^|#{ICSqQ8o1lyB&k3G8n{RBJuuCHp!&
z2^X<NC%zrcQBjFFwB!AnUi;*w%W1tFy-M`339k_kEA&|4u)(nMz<OTR`?g`;^5134
z^Kq}2gF$$0y^<9l=?u1T*y|xQ@xR`d#`!cr2V9aY199b=HKCvTa@bL<ALmHjrxYvc
z?-R8#aAc7&LWW{b?d5uq_a<q_GC7{z7mbXexl})`2jYY8p-<7lBHhf*&%MvmGXrrO
zbH-{R^Cah}Y^YHxAV0n9M?2y9<Pa+0+5yPQ?E74u{;+HXUBdCgoLT+FhAjo&KYRta
zl0+;5v&lsiM^;(mL;~cG>2-h*KLSCTYumdQDm)mY@@zNj^Gu0^JOYM8sl9lHiB>MP
zgHrN7cc&ntwRJ4q4I2J4DDZ7SsO^2@tNLWM)lPxZRtE+y0F|po58=M=E{6Gj;pX|w
z$#y~(3;rO~u)lAi?7*aZ@Q=+iJOn-1K}DJoA{ONs7bwvuud&@yU!4#)ZBL@t@MdwH
zv=wETS7dPJocZlGg<S7!3<)UMOxTLTa_s!Mg>6c+%A8GmWf+lX3q9O}Bhw9#u|c=l
z2OpyLwUMo{e#fJ`12?fLSI#i0a>zIRVI{Fa4ia*ksSKfp-=UlLY<>h;<I6OEHf^%0
zVt+nU_>Ac*WG-S4R_|T(v!|~JPJC>hxbo^Ar=B=8^J76nOYrOs?D3mzl^uCZj4|k6
zy-81%63Z();(T{19_cN|2(p<2#d%?!J1HC#)LDdM$WGx&?h{QU=7Ie8z(Zl_k6@ah
z?_}<?cSFNNj8BU>^!Dx{N^(SowXNvh-mb0uIjrkrUx$~AUi$^_oQ(%p(riE2;ujwy
zht8cS+NjzXQ3{Py_;WpA>XWcJ#oz_yXH9^?#Cx`LCDE1OC;z?I;}G!4c|c|FYiRAJ
zEn~G{cUS{kMLBS2`Ug<KF1wgop8WEn&h1OZu!%xRVyG;2xVgg1TQA>|F=uc7FqDaq
zzYNwlZ>=)7#eN&t#W%ZSV}qQSa2#sRRdxIOMx|e<5AQ~(;C}Z%KtqB?pzjcSYQf$O
z<mNe-hjV6`{5O2bjTZR%%?F_u1;1)lOn|pI%<_!haIf2bYhE0XVwS%=IJR-7=fUCQ
z{N3T8L-2kVF4KU}vDWl|fX(64-r`i(k7wBPbALEJmx{fr7ceY%=QilW=}p<Y;-zAw
zq|`Bej?{4!mG^*yc6|5y-tOsd)PDd0OXR%X&(||sN})gYZP!54lud8H^J*sSnyF^u
ztLo&l0hePhSeYpZ@ZtbLo6-7BMSYjb%H$NZje#{*jEZdPweh&+Y*#lf%=L^acf$yu
zQF}f6#)$p-)PU>1f)DHe0hs^s@e2O{wkx5Xy!%4Pwexw=Wo?p@ZE~5ryVzFCJIh~^
zwa<u%W$(N*owshOn1F0OlI}kkGJtR7U96ih;(O!>4_&{8ZOQ!i|5OYu+%B1pKIEe8
zJkc@kX)OBmRVR^?eP9yI%*F`wCKp!8vKzns2av|#W=LBb`6gLKrBZtc7tqcu%VZjz
z82tVBAHXFsbpF@e!M)7P;l=}p=FoqiZs@n<(E+=X<Y($7Th33vd@19cN4GU+=lOR&
z57Q0)lhygIdeKA42y9I@Td#Bf2^J8=JA<%ARxR>+-mimi*nfXMP_bx8_y<rLOH^FV
z$>KHrEMq+-b#xF}^6HS~<8<FA4M)vA2p@*CsUt&qc-LZZJHjl<>BeBAOniFS(bRHv
zCOl2+)76iAsiB1fb>NqTBiAmTl;IyR`fU*@oc;ToHCq6qI9LvD+kZ};33_<Jm!^dT
zp-wCP-v+g@)u&THD4?QUCM`3jSj5=X>)gWW@tdqoF&Hf;!<o9!uT|5ron1`z%;1T#
zDB0*ii@j7WWXri6u+a>B=3QW&r11|!0}r1~&V0n#BR=Be_%dAs#3ONMNtZ`jpArhT
zy$2_D4W6HuxW)M?LBmA5QHg9PfnmdZkW;_#KKAn63@MI=a*YDZh;E9`^}HJvt(4QH
zZp-X&cL`lULw_tJuh>g5^j6l1fP&DmvzZ&Ah#yptQWyho=6mVaHStHy;<uheiVt^d
z<AOHml1<XhDIIEBpIF?VdugkfRjxJ!h6lP0Fsoz(>go1yK?@J7&T#oRB<Hv=^~WyO
z*Unrs+xh&2T)U=<d|}w>EWVd>c>MbKzTu!*-9Zp#Cyrun`!V)nRe&SKNBGfQ9-iNV
zJnV8wEsNBM`Rf_NeKx(#1mEGL@OM#1VgT+)9`$2gIA$r!2eF=CC(g^56GxRH4YTf1
zUEiaY%0wX4r$GJ2M9F-BZ;18zsuWF%s7ms`zax~glm71pdhYLuQ{OKuK3V;d-r`pZ
zj1e*!f+b_3SVsTdI$6V^8)eR9(q!nBdy`USt3^SDMGbe1!=}xSh=?5#4VbrDx*!*G
zTHE<Co>$1{p|X6ia@pGGTCuCSbrD=(z3!?gcqb(1xhCDRtU&MUJ*Mt~%Cim%j|8gc
zza5GNBrZ-&Fpc{>XDa<1l;AJ7`1F3?%pf=(veoh#B;2BR43}1`WE#d?&`_xl3C!{r
z81Ct2nNB`=2W&V0k@>DOR4l_JlPj^^&gC-13loN_weOU_0t6lh!+Cexe|#!@#eOBx
z=;TDZ5KATLVA>;sgIA}F>UyTu_R8npl1KLq)<0b-bzzab(o(M`tLMZEBm9U6&#_~(
zP|)ED=Y<`ei|y#v#NHWiu8wV(KEB1j^X7y{$I%vrFQa1^8HF{~D{6$hso7RUE>{(z
zNwfg8StHgY8S;j`O%?(Mi7w@=QIYp75aPEhVAJvX!b#+^D5Z;vRXYlGH+!ise5iX|
zUNScqXW^e@c+9=WO$k8sqTkF;-wcm38NXoda^!@@hkJIpmlUc~cb!uCJ<j!`=8XdS
ze1$1{vNBWkYEl9;W&t7_re#sNqbD{4)>6H18N&}nSUNhA6gRtM-ag}N&$F-ZhH{jn
zr{4_zrtSi=-mj=H9KpKX9AUeQx~8SMr_TSDo+{6zrx`D8MYyiKuE;fg&ZBqTq8)dZ
zum|2Vg(AO>?w&}yW*|9<EW$3PR~t^^*TCbl65=>q)JxTj^@(pHe_^;@k#57K&_WBn
zitEHO+~y`m_K}*IVNyo45RLs(bjwkbU?oM16b`k!2+=tCL#=Jb=ZPrqMmJgQbx$|M
zw@9HG^x@fQsz4i6TAZBP4C5YEdC*we<u0a8f{^>zy;uVV8)sQ1<at@^_)Ir)j%HJt
z8NbFjE=INtgZ;uTuHWVpG6{Eo$|K$;Vj{1Z*)gZZUgkKmLFySsTW`j!y{9-*clWH$
zuuopP#S*zbQ6eZbrHhF~<!Y<7Ja)}tQ9RRg*0n=Gg5q^w-GTEu8Yz8y=@$C(LbX;$
z+NFfBMEu50>zg*{YnqBDlBRTt^4`Iw%me9XjR`pIUrE%lwOi&Dt>Pgm!e8gV`&{|j
zg6{RFS8(W0`Fja~L*RVp)Jpm5JbO&>$2ZNe6)wSObXOo&8Cf4;U*1V(htAvCi~p|x
zxeP}0@V1fSC{*6Bl{yWnbq^9C^#1^8dmqw<#%1V!Fwr$OSj7HJ-`tSnIrcpN06OYo
z5ln!@QJ4LbUNNowI@K&4zRfMQ^dphA#BYyuWBiG%%XaZ@kXOh$j7mBv{o?+)6s|oF
zppq~#k<zS<ss;efcuYP#@rAJ3%3rt70|WXI{Hn@)OYu%XHps`&2?Y%XhqbhVm|H#T
z#|OlJ5a5QjBOOSQhyKcHd3<@VLV@1iY%s{#IOqQWAyHt^^{fnLnn1519}~4Oq&211
zSJG7^{{Z9~dVEaO+dt?R=N(9RzxdSwW!&|K!KQ#cb6#A2C0_BnP1L(|5g+}hQ$g_x
z`eFonUaG|O#Bl!rGNOxxmvht7B*;5PeQ91O+c{oq&SUY`sUvJ%BUmJVb{Gv>kHvb#
z(rg+|x*NaxHNHPOL8o)PSmIU2(NRkpK0yEj*16kjy>{aaZSSH7w8Gtgl}y80E?0f$
z)HS|8l^60Imva1;i;`Cu^fdv54&RZg-)7bu0Nh^w4+sAM6HaX-SlkcWF9|&D6UE2=
zJ4Z9@9I@_7V)f>tmL>?rM<8SXGn%k1H84J8A5!u2`5KT}UjG0Tu><Lx{{SIRnf4~f
zjq19^&Aqf18il$%k=)!~m?t}Yvnzb6$~ow{+n(aF+r&DrpXRQmDie>97%X@l073HO
z+ZFE5qqKyR4WqH@SraGoTBx2EjyaxK?QLT(%PNtypIx}deeqLw*n6HAu4=REdXc|)
zZK0je3B0x>NaKz8CxeV|Jt=^!kg6nXam!&t9Q{DZ9eDazy&n&{oPD0udw{>tRH>md
zWA}nQ_KaehljtMF^t*i*e%(t>)eL>+Ry#n)bx;T%`897>)NCNuw^&)~J7VO|70>#@
zKs`z~7{}JVgm`}0SmD?l^V6Twllu|?FeQBFo}AOXgm`>{4O;RgYfI}phL8rip6W@4
zI%8;$Ir>$YG_6|ZX-1!;!D^}s@0bnK4m19<KAGSO_Jg7)RhjeOAX1GM9C0IO{n1Cf
z2bx>Ot3jkqW^_r-%f<wEMWQA)CoO_7K*yy}{>Im%Q6gJR*Ce(idxc<tfzT7fb>|23
zuUc&#2P)Y2+A1-moyT;JeSlGq(vNs&k&g}Qa#}63SlZ*vGBk{1^BX;x`6N<B;C)<t
zr7e*<<IX4f3ic$@le@}CyHozdZaF-W>T^!aN1W)M7u2-t*e<u*28n|J3m@rTg9NX!
ze8mC#g(Qlyw(yP>N99w9lyt=jFd{}c&er^>dD1_;7=3uDcocnTuoW9ownOtsgZ|nt
z_|$Ey+J(rGTm93!`3kW5eXOs!?c*f(D^tf3!n<Mt2m3rxVcfuf6xyqhvqE{m0c0on
zR5Ez7HBvxk1N)ZFf13*EX10!O84xk`BbtZo5DthauwTgL*TwV8pC-ccPeQh!e>OF0
z*I3pr*oD)sBlnYH{{Twgj@c81Tz(a0BFAc03$rB0tjiEFyb;O$*K_>B)JbdN7LY8G
zTB!9BJAZ|AK{MKe85CG|WgC4yNTgPlXPX_%k@{AOFUv><(UJuoVChAGGma@irWXd9
zSZ9g^Yb%}s=mj8B7?IHOaw&?%&NkqVzLenjI5dVRE(!T?Jx4SUS%D`YbQJ6d10WIG
zCX}YY002+}SwS51s<A%S$ON9)t6P<n<d6rUs;E#NdwSDCED-n&lenKiFh4p`<}dJ{
zs}WKu4w=Zv>Hw(7`NHkaImIE8IYk-y3w<ghxGr5o6UZBQ6yRHN#uQ_q>q_Jl+BhfQ
zngE1M9e`d>L&p_6oMFF*^CZx?1cG^9z|crx#z^*{2xf2x1cvSbq|eAU^#1^BntQ;l
zj`;VX(Yla5lv4o|3BG1tok^$d1Yjv3bDVKZj9`M`W7DlMgA=)n5zr}K)Bxs;F&G1Z
z$u!ZidCPx3De{8KgaDv_2{`>KGU?e@B=+k-g))vXq>g}-*YKw}$_>F#`e0<!1fUI~
zHV3c`(tu;yODP8&ah%Wqs$69h06LtSarrtsZ|tL*ch4+A6XheW?lD3jRXAooxbO7E
z07sTNB!iFdie7`19Q4CdvK_ejQyoV+sLs%Kge8Z3&;yPD<s=@Nqs++X1fTB=(zrNX
zsz?KZX~QJ}JH0X26ad_kMioi*^rL%#OD+ef6y=X$Qp^ZFvCrvEkybI1dGrL(0;?by
z%Vh8grxp*5%(=!g38+k?Ax2Lelg%&6omV9E$j9}fz*q(}R>N*=aC&3uRi5F|z~^px
z0Mdd8->?zW?_!u^EP4QF0ZXHTax>_2{b{5?+kspY(1A+OvVg|})0$AO?YO2#0OWpD
zSPc2}=q7$sC$8-D6_<J8dpW}Wo!SR2^8m%SejTf%C{>OSk_S`Tg$WnTS1u1ydQb-k
zKZEWqr;$mwn}Psw$fURM$cGFQ3;_oxKb3c~D$2##fFyLsT8r%H?SdCQat<lnRyZj9
zI<Q<Ul0;-(*&vR!dR;OLh{P)ZKu-#yy3ey<oDuXLXb|l=WyU&WQ@E^gtKp=MVp%!H
z21amd%vuyE3wd#g3G1*|RA7yXB%JprjMGK3Glf><_uECn$2N3j9F_ANj;FOr4v`2T
zy5RQ}zSisyO!^Qy(@Z!#j8Sm0SIUAE0b&E_wJM^O46C<2YF{j50fSHSUj&t5k7@=+
zymY0=JeqdGI)nI%U4-BT<en%20}6MUoMh5@PCFcQqXckA=|BiiIpUD@r)~cLIH#D1
zT;Pvf^rQnjQZe1vw@Opf1D~ZZsm6Jr1Ailr%8--09lFwmIHmHK`+az(1#-D&Y@8Ft
zD}Z+L1}a}Ik3pZIr1D#lkF6I0gRfds>%}-iz@9VoG{Oka91i}pz+KrG?MQNY29q4p
z;(!{*a&6~5>K`wj4Oqd&CfxR*2vsn8)5`J1JE6rUC;@>A_osoHarf&<Knsjg^y^2`
znrHwtpweLaP<ZBmBnKTSOSWnt)ZoUIfSxvEQV<6u=BtTF??IE^m4-^h6~<~4oYA}9
zkdQmjAUPdq%>aUF!)P9~z|?mr0<@>x4lzw6nMovL(=@IAtp5O>^wj&~{pyjDa;BcU
zcVr;?(>s1N>O&G$D%;0l>58Tg&z?s;t3prpUyWzS_rDrwMf+ACGT`T}B#f%ugP%c5
z2m0^$1xA0?kEJf79fu4_z*EozPTB`J8TG|7PrVoW)ad&DGyvhr&r?d+Ty)@cCXn<$
zLr``!0QKD`EJ}|>;+?ff<Yt8Xr|C{lx%kinT%EixN{vgdH$+2!d$0AV&-%gs^;;c3
z8UTe+ha@%+b~PN`aHWsEODEi<oBlo5=}ZVzkG#rT7~s;QmSK>?K9vJ~)%<GAJs0Uf
zi4}0gf&e{9%{`neZO=I925Pwbvi(I$Kh=Iz42o3PLl%&+>T%MQ5fKZ981*OmRgTB#
zYP6qms1YXVB#&xgNB56hQfJCysJqx4l6l1_{{W9v{{VWSZ}|Cd#(*Rwxj9zaI%mB?
z;a?$8vkpnACI0|aezh3?09M~oKoZ5ZW9JITrfF6^(gtF2=qdyE$NRN+`=3e}5V?KC
z0s%b%7$S|Vv4(7%b{*-5{yz;&-|_IDLqHG~B{^URpr-~hC<C`$$CE_;Q~l~}pK$*G
zy+8zx=Q~gV+<-AbWMu?>@OdCn5BjG1)PwzKpajR5_}jE+8QY#}CozIX2>0t&&-&E*
znxik?{AfT(v8g{ShLxf$d6T7A6sPX~BA63}WgE*31B{wo?4&XjAn<skUf+#LH~P9j
zBNlfBw&T=NFHT6tdXg!F?kz?>@SqDIp<kJCo|wf(&YYd6cVG=M>NDDefSFkaTPzzL
zGn~?_%5r&7JDP{ErZ?UEDcCMtMMD#?#?UfOb4>_9P)Q(;ql&JdasHJoU-9kuQm_vL
z?*ImSlg=rU9nZHJ`qPK}dau;c7gzdFF{jInl;99}C#5lV<QF@RdS|6A&-&p`^#1^K
zP$4o<H!F_B{{V#{Lf^a+jC$j(NdExq2m7L<Uf=If1Gb4k$iej|ijB&}Asch<d!JgA
z{{XD~4MRWQU#if6RD6?>&tf^nAz_>lbJLa2<yI5@Q-8cal`wtP{{Vqd1Og6(=O65h
zP;JQJm?_Unvmf1mpsKU~0Id8)Gy?sVj&YBzF`<!9-O2Xmt;5uPDhB(<>qWqdXbf@{
z<c#&jN4BPu?l01#>?xpQF-F>VdMFT><kA67q@V+*UTFx)_NDJZ??4RNGuDrKbuW5g
oGohmt^U%{x048a3>M6acMIaZCL-<nc4bkv@>8sL#{b&RK*)oF^Qvd(}

literal 0
HcmV?d00001

diff --git a/fluid/object_detection/images/COCO_val2014_000000000885.jpg b/fluid/object_detection/images/COCO_val2014_000000000885.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3aa5a741a31445454dfb3c3a8d2fa24948aca2c4
GIT binary patch
literal 29582
zcmbUIbyQp56E=<prxYnAP^>_UySoJ1;vR}yk)j2PJD~y;ch}+;DDK6z#UW7K-HS{3
z9s2pc@4f5(b?*%;XPuMG+1WE^_RKTSZ0@J;mq1SxWaMQ)C@3Hh3h)QIp9Q@JVPj%q
zVParoVPQRbgpGp>#>0L57?<oR5dru)IW^UDa!N`XIu1sfmu$3@luUxmY+T%Ye0<c5
z!s0?aVjR4DJP(7QJbLs9_c1Oh9v&&r3(6Nf|3AO(+dzcaC?%-=Xecj1sDvnJgedo2
zAZlQq7%2Z<p#Qy4P|?saFtM;7;XDQeYMy{lQP9v((a|t4(9r>DU*LNXIw8i>7u-^q
zMCvA3FCD==pQ68G)4r-`Cx(t5(eau(eSU;P@{E*>oSuP^iJ66uUqDbuSmgB^X&G5L
zc?FHPnp)aAx_V~j?=38?tZkfKT;1F~JiWer4G0YS790{28yBCDn3SB7os*lFUr<<7
zTv=6JQ(ITx(Ad%0)!ozE*FP{eJ~4@yn*KAhyt2BszOlKry>oo>_w?-i;_~YHK`#^#
z+J9;R$Nv=j|ImvN&<hnE9St4pK`#_kci@dih>r1s8}q4@I+lqe(Mz6B*x*;u-z(Z5
z(egr%h)tbFaY*R+mg$clRQspd|96Ug{{Ks}|Et*lORpIaE*c8(@X!cB5YR?JO&VW;
znXo4Xy7NSuU;G!iwO}0yK5bR<9M?Ny9xsF%!r?eljJlZf5_%7^Grk86zrP3l#L)Xr
zv9PZAbfzX$PbJjX!D5g-Zs*{tJ!}nSEU&S;MQ7G7uU%-Lh&-SF*YQ1wT^o5>Q2x!8
z5IB8?yt)VNj=Q{ESc!Pit=Gf5J#`a#jW@-J^r*Yk$_Tb|CLTYmO_yntvPyD+C{+KX
zpGZXNaU#W$e8(%kZ(QF)ju`Gi<o2I!re0`SFZ6zYO?`g&)q!tY+cxQ<M{aAk4^pse
z0M(z?&%(196EqweybaK}2Yt=HO-nC8qA#whQnCJg9&~qW^Y-XC;NZYogIG8>A%Vo2
zJ@G(Sru7T;u1c(N(~VFa?oT!0M?cK+#hGP89lXn6`$Yu=&kW6yT2C17L3%YVE7!F2
zdf79ohrD~PMW?B;j;2xF%sYdmACQ>Ui7S}(iKB+O=g0kI!w1Va)OSlljEivUqlCc)
zw0jT?>mEd;cn^AHn^C)1d=EP2=lQOppQ}(rSDGAIwu`qSdRVi#C-3NQ(q^pD4ru#3
z0ime=yp82Z_yxU2J8j`PtvG8L6}$0DHQBk&F!GlQpkReq74;vdfI4FC-mudl?kR9f
z*jrCCa~%5%ZkKR9Z||g;0VldX6sg@yZP}99?ZoqN7JXUk!n+MCAz25<*&UW*kNkQ3
z^&ip>huCIVSAfly*@8e6YJL^68n}F>2i9V5a^WN8`3A3xUp4yff>U^J3WjMXm9<ky
z+3m^n^yA_w(4V4Uos_J0AwG591qfP-{v4)qJ}daO;w>61*1w046(jL>p^F@<amiuy
zWLKJ$m(~@3jGz+K_kMIbuQ0dc<+G!?l-@?I=Gb25vuLe06mD4nucRfJCjEI2QvM^}
zQjyZ^I{)I+P$H%(957ormiCB_b6y)k&5wJ~O0i$LYTb%<UHW)KkzmLvT(;w{wIJp;
zy(yKzM;t8BQkau)8LFe0YUR6u%f3yTNHy<BubsfWEUL!$tw=O#BulQZYUfEug+wMw
zxcqw!G@snF-a!RYSBwKc6OpmZF2`c}=t&}PUP=&w-WHn=w==g^#x=Md<2&x}HFrwK
zYebmf!t+2WIxpX1p{+L@+}D$Z{DV|jmJy`H)9<2UT;OI{FpnYV&+es*C(Ku`i_dzm
zwl6itwge`n*>r=5b$t}`|3(wgQnF<(D~QSFSw^x(NZoB-YO9LZS{Rf!IC*eX)wA@@
zZuCNui<U<@=H2J#|LDD-_RLpPT@o&}c7-!|`t-lynCBU0q|Ot0ZYv)sxs}k1MI+iU
zj1<*i7N9KMISt2LpvyRaf8%4G^wJg<EjgQDF}!^W$0M`L{b{FRtz$PKYY$$t)0`2#
zUYp|N3^z>~-B0m$36u)bS##+-UG{o=e7QrqPx6PZQb#-cGpytLU(u~Hz8Vq51~c{o
zUdzoUYn|vnZ>f*VWYz0L5TD@eu>sL^pUp*U3sGt_{KBDgmpx+#^buy30os}~>vFhU
zp1pZm)yJ5LmbUH=*tI$bE8GgWT`>*ASd{9AdYy<1q0|;p=#N^@yvA3d?Qhd6gje%S
zL@EE)MbFulzsf`s&K3?RNI{((J!DUddliq$OQ`dJ_=3~(cI})mY;0~Rb^DE7u=BWh
zNt1}+*S_8%iNw`g=4Wq?I{L~YY1)VIR=6N36C%8OdWEqKzaGa&kZkE3M>G`IX^`dP
z>N|r$cizaFx#w6NCoxK~xYx#vmuh0X%l6}{_n=QeU<-Kj@vq_I^!K!%)dmC96y4Bb
zlwD?wVhfXqyk)Ji*AkXzk9G~&MBIbyH0+v^T4(eNFQ|LLf9zbep%`<YT9hX&;)=Aq
zU-Y;nR6czUdEJm8<dE|;MrK=k&0(nFkle!*f@>RUhknSP*TpK{93f#>K=b7QyT|5c
z8t*a)`o-RZlg07UqMDPfI$dR9O#HQGf^6-p5U596yRf+PLcWk!(T<_QMBa1a<jq;w
z48OZ-16MGOHCGJ&qrJ}WWd%sWVLaG~${K9eBl7gS5ASl$lvWyS)~dCm=B*9;2a|om
z->LoZSwAvE@90a-7r~n8mA_@7-q%xBNXnz}C4nl(fF%MRHVv#GF}^G&#0Hu~#iWR{
z{r=+_y3z=7bE~|$FsWXf7UHQHU3t=vT((&flt+lYaIbf;{p14a@0U&tW_K;j!%D^E
zA#s)*0prCvd3~q%ApBi>zj^#rvDyV)`c;%xNoU>4!JB@p*BP>Tu3qBji8+S!lab%n
z!-F(EB(%<7hI=U!bqO-Na+KSlFPSW`I^kxdHmpVJA)cgY)do6SH>DY85MJ>eigP1s
zhJ&i1@>C3w$6xM2<#Ilvxo*e&t*a;x3FYY?hxxl-Omy!il$I{a%7!IJX<~n;8dTQP
z9^~}2!w55MZf2(mzn!r^!sHT|`4s6TOg}9g-Ko#_+QY4ljtbLRFVepJFEK@s*L0>_
zU4vnH`L-miifVCP!Nt}>8D3^xT2Q-cFv&EWvi<yR$?JqMfm71`)5U7TVVm#fRXk$9
zf<(FhnmxZP7iem}G2FW#qm!rQ8vZWkmsEC*ZCY6o&34uz!5WC-oEcZ{I9{$|OL!~(
zn98A=_tdGi51JE{Vy}$(vRO|(qL+<2quMhgnSaE<1NtnCPb5DrGDB@{UG5Bv>=sXp
z3Y;h;l93!~TzwBZD{-)t=!J;z(#CC~d0d;;*uBg|Sx&EmxlZ6Bh2gX85c#ns9ixR_
zHBZM4dAb2jx%0lZjZ#HBvlx>v(vf#l3Vs7pLm^&9tv93So3<|Krw|3p&?R5!K%ais
zxIu93%bo^0uI=7>V!wGBrK?WmIJ9^v9t^6DO!w0HWhg>ahggbw>m++q;%d;0NnY{I
zpy_x7oHk>;nW&tMvZk!xbp89^DEZzS;bQ1aKi**?gP9OTbRc#gJgA{rS8VOuRde_+
zp3RKrz+x!>v4v|TbZ{aOs=BMeB!Bw8Lp@T%vi;E2wiMl0%bQhKUdt-ae7N9s#Lt>3
zLtTS5I(a%lk_B5iLKF<0GYhI_kg{hOVkW-JTVhC)hljXrcvruceDj-2Qh)W=mQEA*
z{vaHGGc?B7+Y)}9ij+h%U6r`)dU4K)?a|L&IQXI{+e&emII{DM{Nx@*9Bk@`Vq$(X
z*m!H}#6iyOyILlQ(}c*4kwa}pI?sk~mm6!>P)g6Il`%{TQ!n3kmbK0K*Y}mq>nndo
z{yBih^?H&g#L%`!;n$zh&eph|&-@HA{xHLrM*!6a99I>==E7`;v61N{h=~DzZ<1M@
zsh4Rzd@#Tc-nSzc+m);d-|#L<^5lMJ4_Sa98uwovm*-U0aDV+gCor0!vc#J?P<Z!=
z#2lxX7S|XQX3rSoyW=?@&`}uN$0*t(5LwRO{9>R~F-6ayDw~%7SG?(Ge!O}&p|(5Q
z?1`FPU(pW15h-Tx(87$T^G-VNGJVhtzB(JU;~sbAKFXtrya5s2ZX>iDifr7D4F_I+
zO2dpEo(Ip_3(NBlG^Q;jTqtGUz2t4ZGwAbp>bN*safSFQ$fY0c{WSR{&E5>pYnaQm
zR%`el0DsW8*3|}zrpo-OSygKf>|XtNAfnA}NxUA8So`c=8yoy20!9t<J7X3o9g?NX
zos+u<)deet+KS)gU(uOl1Qk-J8#>_Ixc?+;d@}uu56<>Rn)DZlfRS1OUbKCoRfmlz
zJ!xm8by!qYYlDe2<y@BNkLt-S)w+GF_45dZq&sUDh~|fHAwomLPK834^2&0)GFvly
zkAt#5DN161tJP?_!O#&DX>fHLQ#ZI8^f@6!55$z62*y^>W488>tA>uCnuh2xfwF<m
z6_~BEP(%LD$Iwwl$w$)cU{KO|4%$pYf<MKlZN(6#EHp55Obtbb+4|uoCQz<JqhDP0
zAEEHpc|K6|@l#kM^XUZj%SaQIEcEPz5Rifrbacl5DR3it&QV^rKSgB0x(u^bF4{+N
z3{tANYPJ7PzMK?5)zI7`rDR68AS|x*AO@_g+_qEjs=VZ*5N<_zNeo7b{DhcKfvQEf
zTm5H{bg=qIB1h!$?G_uX879KagyQ6@x+x!fcr_}GM(`IAmZ@<yZnT0OC>(tk0Rn<&
z!UX#-Tps61=RT@tgi5h=v0c=tG_m<(z%(FrX?9c`ro8`@2QENhbw$a~A<R~wZqyLH
zcVN>HCKRje5D*4CSPca*$%8G`P!y%XAazA)Kp1#es{=O}RmZh4<#mJo6_~R9ahS4`
zFu>|^sL7ny1@|DA@$Fbx5iAdlQex{`U2!Y~ogQ2r2ZBOJ0V@Ne$t%iPkwT#<OeoC2
zV?fUiVFtwk1OF3>d$71XU<5!i4wsoJmk*>tPj6}}&OBkacyb-~_Ll`zFIG>doef8q
zr^`|pKFHdExc+3-;V;)_;p42+@h8zdm=|L;Z0fa1-l9}_DYuO>HeUUnUFXqRxrFkC
z=rZ_1mRV6=(dg4=0HEWG2W9mh=Kr9u;)8XVKl&1^%Mg6OEkwa!C%p#=WhViqbKI50
zU}vPtPNbax<mAK~02?O6ghi@fjiXrQ&{j~VvxDgW8-qa_#18i31H|N|QAr;Bh%am0
zDho?}EQ*|02_qNyMW+AWnBe}_Nyu2HQtF+CAN5ca0bq-7fKhT$)gSx-81H|UQUB+k
zxC#n-3Ls{{?Sbb=55kNAoRTjv<dqdDH#_FjH*02PdbFo;Pw{<O6umvmHg(unJm1@Y
z`$C7<_4qKY=Xp^@lNLoApyIUf^#Y>6?g;<g*^u*=7QZ0M)>y$AZvt0Vcol%&jlY}1
zp1*R%n{4iqC|=}3x;m>$y!oEG(Oa~AX!Ks0Le__)^xfDPbSm4k9WW$d7J9&qs@0WH
z6GE7tK+nMHlH?zEy&{vrFdI>KmQ|b6A#v8Kw5yN5SBpysd$agA5uJh`33Q)bg&ZEL
zupLC+@xaz>ne$xU>URIED^z|gOSc?7Ws4WWLtr%d>eC1mQA8(iI(F{OOt$o1%cOe!
zwc}KaYYN8iIs{bFu_D=g23u07t&F3I;4!YXim!#k8Vd`(3B1pBUd8+I^h%x1tEA57
zqi92N$oNd^ZwFF$`z@t8_1r7zE3xUiSh=jXK$;8FW*MPy7un_9OLr2$9HdQ_(UnE_
z{w#tJsLH8Le%Z+F?JlGxE_KRJ62u&;S3`5RIqloN(wO-~hWd3qW3ip%89FRnwUd?o
zi;E+-oM}9r+GAQwlt5Xk>_%k*wM4ZEa<b)GCR&Rt6E!DpB>!*3VxUJ$gTuirH&WAC
zp{-6NG+kn5V@Ag(Y04`e_X(;vp65-DEUnBcer^fDD<S@wOSixKUP>kr=0O*B8OWd$
zq-M~HErb?IRCvW`xOntp>Y6;nOOQmc<~QIXdebrbR#ZD1Otd#AT<ZIWhS%1$>OF<*
zCNCsGN$m^`;TR-i3x}F<?@gc;U*;pP(2mOrR4rpwYdjxWTE!<_$MEXB{ZN<|kyjXy
zu`|h5I*G<-f?(q!>v7)IIWxW8gf>D>fsBw`d{(LTCRAQ_6<SKfvu+X<sTOq0{4F8`
zn`WQp%a#YQ0_0_UmC307MTh8J$;Z+Up7J)joo{g`JNh0p54DO}unIH}d~*--#LmLr
z$}Uf*y41P*(fO_R;AU?BteatM?mdoq>&`uh@OJyOUsw$b!i99fj){r;Eb>_*2b^By
zuP#k?ac2#z6T}xF4Vb&p{>=tr@PF&%A#Oh`7^^H(DEY%jOj%<<&{3ub(Z9&!ivz{Q
z5h*i7br0%}?=ei2KV2Z~-|5YcSexXr{W-#STriE-<~bzT2aCF8iTSxI-~v>0^3_jf
z3eh7M6c#=2`cKcdjjYRdoo&1tG6Y${?4ch;L4Fq#tNdH!gNJ|k^c&pYl`3IPgb_?k
z#j(T&sL7wA=I1uP%xU=*v*SFcOw{+p)AolQsZc*H<1tdx^KdngGBP>oBk%aG&hKyv
zVpo>VLRxf<AMau3ky1KB(r|aa^B4|wY2ph1Y9L;+gP}n33(-S=2;o5Zl_X)4aeZvE
z%LChb)dYXDs@w!Q?Y;*^m(ePdO|oO%38-!wmK8eYE>K^aE)G;>c7+7DK}V%+<g_i9
z9q&O;eXf7sgU0G-_I>!cvkRIdZ+Asf@kNvT7=#gWr@;`o*_0xD`<Zr1>L6i|i8Pwe
zcU82pqTb~{f0GaPUl?uv#<lK|r^6GC+IqrwJ=uWvtv@<4i~7z-TXsbr!Tx-eaU(t+
zbWNE#qUpiO=ZildL)Ltye+a%xj-1LO9^TQ5n@Jx|rNJrY`iB21^7taplG!-xLx^VG
z@(^9CgQ8Zc-t5sm=*dp_what!aQ<}8A&^sIxQLDIV{F;*<&!m7(5fN9H#X#-+`hLK
za;+Y%LB`|!oic+ItlvAC(Wzgtj+P!k&j%>{G3gAV59j<mOLwd$f}U$e3Kg&<zANVG
zS|ZTUsu6|!*x}V*UDq1$E?U!EB+#c_Xjz++vdNc`xN9wf>sQtXyCU={SPdC?k#L`W
zM_}<Vo|KifnRZNgy~*s;&R8&LS)Z8VyvQ}3K0T$5_hK*XhS#6ILB6UeERA6i%>Z~&
zBNgBG&i9~5a&~xLQ@z{^!!hBu{NK)EL6fsrQ}WBK>d0uf&ggGbvkl1j(oB{HR%qvp
z&ZWER&FeQ+bD8nFy$cQwQaZyc%Ib5KbLh|G)!u0yK2uOa{Mhs_y}V9?DQxVRfG4MC
zr(PF4bJ`K8=yi2KKN0GCfq!Vky{|iNt*-BH!P7;0WQ+z$DiOeiZYt3ty|Qj6LS~OK
z>gz`>S2w~QJ=+mMCYK9@*-SLI<mk1)=dR~EeiEPvjq2zq&V6ZeYHHT-=EWaap}?Mh
zk@Avq?QmvcvH)+rt0E`@!K^JN6hwnG(0*>%8}!o<pTMd29z?}oeN;p8a}tS#I>eQE
zilus(c$EZs_jyLOTA7TAGEA|<a=dO`@8gcl>vbu-sx;p&rQ=iL=oQww`bjD_<N-mv
zY^$@bqcYvo(h6=_ji|O0mY+pXg@ck?v>P+*)(xD@Mb5th<$K?)-&DO}cXte+K%@SS
zjH_+b3zQ$5KOia&L@&B+L`YArckc$($LGG|;+<(hoqQt@l^q#RD0y4_KB$EFR{Iil
z@W}2S)Sz**j<mq*fzxuv)IGBOvGnbm#_47r$xL;{2Co~3`$rNeMdk->u011d=Y9)D
zh=yw2@Qi7%;qL9u&jL~XnbtN)?Z?(rEP|iL8}>%lC2Zjz&*7+_5~uUDcbv!I%J4^U
zBh>SquvDIcDDDX1Q~2yH1x41P`!64)O!JZp*=bQAc`l;Hcu6pI`Ppsz_`_`GYvaPS
z%md|D_ps?z*R^LgNv;AgYO0vqHPx^slK3O@J(A&Oik8%kuTP@S`Ul;(wh(%UUpJ?4
z9qT{uNx#hXFh?gy2c&ML1-}+w0E8U~In@Chc@6>QXf6xyn|@9AG(iZ(e|TH7yi{V$
zXR7i9XRS&mz)Ac@Q*oZnx)ENPa}X#G{>~WvnFr7HqL;*-5t-b2!q3zBHdO0><B1=+
zAg^G_U3W3RkbaQkGdPp|8+SLYO!FG6xvy-apBMEuv7}Dnmw~S7La=e;quf<!yp{A|
zzv$sw_}o$b+|L%B<y956wW{#zCh<Rc<=Ykg1?7L>QHmRd6i-l{d(TSOL}Vg-8ln~q
z34Tfr6rGZ-G>Xf()sEX+XavgjYaEVfr{AgAR;DyoNm%Id*8gZ8uQB&P{6S;t4<B(k
zBMw@@qY{0ZD6smZ$<7?PeSThe%hT<ei<TqPhSY^G60P14SKWieiLOTNb^l;)tytyd
z!h->L3uF{L0E@-_gRv<Jub+l61NA`G7`Y@_eyoCZzW<Z)M+HPme1QgI@bHZiJ4_*K
z%)_j;sjP%fDj}LFyye+Kc79_4>zp9MA2LynvG*Xvpj%UOUngpAMu3Xca2~^6XvN@V
ztA>Pv4u{j4T<<hhX1cSRd#LSjf}&SRbS9jk$itzAxeSLGegAPr(n`@rWy^qkcK2X=
zV|=dTU!oZ27}cUeRdlFp9v_Mi0PH`Q1DLxS1PxKpGsV$UPzPWdF&&}j1>qZfvEIj-
zN@;57E;ZAORtb;thhF-#_EPZl$2QSe#u~UX^~>4mH09#%l9(Y=+1<$7dqO>6+tFp(
zlHY#8^;ff>ZXQvuNW1vGjFZkDC@2k3Bj{y3C4P#qvNa3I&JB5FPi@XE79O_zjBiRt
zmT5=xQ#&pfkY(cMJU*Je4w_H92d(zhI~p$d*N?z`JE;f5EwxR0os*9VhZ-P1r0zix
zA-~#Ai3?T;uJ8>lCt;_Ax;43Oue<}mm)S8EvtaxEtq}R#_e|Zo3O{zR+kx_Kb_8iC
zsN<mM0~6_j4?c&j$d}gL@t~bD#MP2>vlbtvZ)|N;nGIxeO9XwnUTH8ob5?A+!Oy|#
z@l%_2a1|qGC@X#RCv}0X>bKfL;X1_a9`t(rY<TnSq3pSFP*x?Ss*BGb-^B4Nf}zIk
z?;##3!mpYw(Ko;M(oCTgFQmVQruf``-B-PkRXr}<ZrYx98p-t{Kb;PI5#~jM)ZBPy
zzKUoOq%gbYJk73?4Bq(Q^xIdyY`milk3#%6JaeqRcvp=6Bl&_i)bG6mw`|1{GD~yP
zXe*~dwmS&P5KSN_`<MTVK9U{E&!k`V`Zl-}=i{ZC_}~p;<Ej$%=_KTo$dIA<)wZDd
z>52Z87F9T5!pdNiOw;h)32>ux*M%)+@*3LTA-7M+zO{Rh!rvpWo&|@$5?{WxF67*U
zjJEGV+e|CF4I`JM?67&pGyYEZvO`%47jM<;&CH1@U&BaL(X_x)=aTu%Ynw@h(j7*T
zbOda(3MK;AWB<zT-l)7{v^~o8&JaxZWh8?26B*<4T?<j>>ZF*1jrILMmpUtC&S%4@
z`PTgJRC-+ZtNzUY!u^7wvnt+ecBR?!bnuX0=wog#1(w?$XalaiA{$LC@e7=UWcD|^
zc6_^rIHWgbey?LDt_6O4TX>Q3!^(w<cG4_la&PtBxP0f*UH4(8*P2*u6i-n*&1Cmy
zP~K)#;f}ii9toT>s$Zl-3)IF+6w^gX-5ISx-;ZS`SJLlHmeBa2jtI<Fh~=DeSRJaY
zH}xh}sjf-GniuOmO%a`QWWHpX>wX%h$70wiW5HN<weqVmr=vLo))<_mH6gNd0E-%w
zfD>Qmzcq7jU2;$qpA6q`=9Ns%jaR$+B7{{BRNzw+5%WBI*8r>?D6*+@Cy<EEUu+K@
zWpl^<6=p{hksi6-tv4CL7utCbQUV@Ygo0bljtP`9yd=YWrcqQq-C6bA0+Q!E5V0Im
zBr3D|X~b(v)|9tQ*Bc9Rx&J;GDGs^uJ`oRVLP~oK-vr%*^2leddV*XJvS6zn^$G_*
zGLGMe58ggp*zDw0EGs%2chZWCdCP2`!dt;<)*!EeRz$@u8*fSMHW5r=@>sVgk@PGH
zMytWOLSy`>cMLkF)fE6BN}vcQ2R#&>l3*b8;VQgVl$S%#<;zd8LSfENv(As|NwF><
zH_7b@dKF5S^bWiCwU*`qyJ<ALeO@r7NvtsSolQAfal^1*U|w?NSC=x8%9xBGO`+~i
z4N?29UV{_|74Pxs+hXSQU7bB;<vpf6ab^s5^<=+<n1os$96hEijO>_%qyT_?L9y0M
zCgn-viIRH$Z4Ig}`8+(o$;{W@w9yegBF|7iK$vm~A$4@jdD9{@n6uY?<ak<Y!zGTj
z4$ZL(a)$pPHwq%ETIgll<LdSlIt6KQR20`1i0nh7WYa^u(k>*<Ql<I9=^dNe%if{p
zRS7S*Vy^;{xZ6!4AaF~Mn3c!j@?r_v%v4jZYeX5z=fvTX+pj*WEc>$A>gZW>qB|KG
zR|*epeO{%!UTM@hOJ`t02p#0Sm8yBm`Vni{fSn!x*?hoI4iwojkw?UT8S>#><!9%J
zWYRLHW!i|9H-V}G{2y3XDT3hAiYnYcQX3E+P<_|R1o6JAWS+LPr}d<_A_#}zg3or!
zJ_%ayBR_;Se|Byg{!yyO%%o?~?#rh4qpq&Lp4r)jJ?C-ynkC~{V+r%AC@VvaVqY?~
zgo&xs73pTA!YT%b^?=~cqZIP=^nf2)l`TYprZ{Ht)OtgC`_fP?!qM8s)}yd*KEtP#
zVrlNPcZa`1Ho0d)nCTMQ8(Q*pwwc!q=53gaQNqMQ5`m4X`{9B}SQgNJE*UN?av7fw
z7p2fk4@?N=@6O2jdbj?i%*hK)H^Dt-dY-<Eal@!L2Y+eOizR04x2&coed4!$vy%cj
zj@6qY=3b*5ab!*MJqR07&~#^@gS<%L)&>7GTcwz#O3uwj1V>zm*xJ^gzU%R6cyrRG
z@|vp`rXqsCxI1kO7+2+9I|93?Z?Oz6-hO#^5AweUvHUJmLNyv}6}-y00D9aloyeAP
zps-OdcweArAXzzFB#kfs+uWz4(W3@WgcM5G^z|i^`rmWn4)v7XIL;<9@SY2KKt#H$
zGkup{8h7GVvMQ~_<nTr;UvjZRo8FHYp_<2@^gZ2CB*Ta%+h4Z&eV|57tEmlKebS3T
z&$SrrDaIqJ8YBHb8+{xLMgreX6h3XMiy9Q>+u8iJCRrUAzxc)Jz8GhG%pZ-{AAG?z
z2VsGS=ZO84Psy+aM_nCIX|eSlox)S|Pc-DKu~1~rsV=D}YD}_A<UY;f=%D&yc}+|d
zMC|H@%{DgMgn+-CF<v2@V%Ohyd-u(R7z?mxza~2)_TG22I=6FnQ;JYab!F{ry>!22
zO}=q{%@h_@l2+x~R>Tk2dweI*gB_aqwZbPNnflIaEl)91_k-(Ocz>>Pl9>E?Xt2H6
zD~mSa_&RQ>&oYi@Wy$t?H54+BS@@iowV=~9R9V;B3A#<PAM;cQREMf<8@Zs0-#Zx&
z9E{5{3-skJ-*YDOt+7SuyLAmKAtI9Oa_X%&<7r(Di(345r+{4AfHOJ?TTfiQV@+v>
z!Q{P=vJHu?mx?{GImjJz`uo#_XXnsa@ii7o*QfXdW(J!3l{1E7tUO9!)aN3UlRoN(
z6+xUDZT6e`*?PL$v-#eaRJu=aG107cceVNXD<}2_8=3>~U$bIip=;xXD7|fTcdvK0
zHue7HP-)2b@@*Uk{;4n)KL%Lq^FOjcfr(EI75X3fsGQ3M>{1y6<);9Yfpsol^|+iV
zxk&-Jb#4#WSTH^pOysqaIID2Gyp4f5qSJi;G=D_Kdvtsk8XxVP$Xl;24esXyI67Oi
zf8-)SENVQdu8vbfJ<rR{19((vcCLre_P~~b^0Ghuk2V|okGlrG27{nu09*sY@4q)a
zS#CaN{O14l?!iH8BUm5LA`;lTkcH6Y@AO9!-Gjibyo)Q4i+b2pBkWq?8Q<I(z-%-D
zc+iIV{dBYIzJgw{(v|bR2D2_c%O3fw{RFy{rhMJ2lVtrnJu2}*OOTF#bBsq>J^}@!
z@(tvx^MUsiy-ix-ZjNe-jD2=Kg(HeV_XgR^*v6+4@2{x{mUU@vCtC{**_&}oi9>NQ
zXobssRpiB-)fMa&)!-OpRyv=@R@S1?IbS@Fvq*kSy2c?Az;83FkugKf{)%fWFOl9a
zUOm&9m(u*K+iKjV>y-k>dUtKg#>^i=!G5-Nlmib^>I`M=#yW4s#JYHeUxa^X#FJc0
z8u12Zb`#w3d(C+qXk2Mr(RQ_KblK0uy}av9-h531wLEXCxK2r!AfAqKj*pIfPazg4
zi9fL;E2*l`s%4+pFPCM2QZ4ru*U8H-bEpJoJA3xC@9&)w`LYf~V9`B@t(AIPcQU6K
z_KUOkT3Z};kzx$#JFmP4T@jl>4yciFmH_!O;{o&-)KJx>9*P2h>gRi)uIbt7X`aRa
zq<yfwO8yAy1KA|WH?Ax+uKXgLZ(PAT5Xh8f<c@{Yf1y2Jv%^N!bf7CSaX6rnEOqG*
z=Ip?;#sNIW`Wv$~@Bau@fLu=aM@plP0v-YIJ=_2Zt@hsq^ubr2ZviC!qZ~evx_k-~
zXb?;!FNu;n2>v?4t8#&q0zm8$^&MtqFkJEQ3Ik|UwZTq_bl*&opk1ooLT`t?zukit
zmj4g_du9D~ELO?PFgyc0Y?|z}xF97Rxn0O(D@^{TDR(y5!M$!i%>LF}t9yQ6Mvsit
zJOXsauPqn(zQTEO2!^70R&?><tJeW!te(aD%u{8{?;uf7gMEWn$vl-q^(Qk=1D(&$
zFK|Jttao18ue7Gu?bo(Rw4LkKyzq1HFs${qwq3ql425~2W>;mWVo{uX#T!)I+G?vb
z1X!S$aV#!;u)Ic8$R%jpJPH5iB#%W3r|>vgVmS`Og)Vm2ULHyM2ySYz9aC*D9=s8Z
z-ti_FChiY0x(*TfxYOSsZ37K<eU$9C)j@EH{rHXr&LA($7>fuwrKQrzbyP4IbMlpj
zKi5F&`a#C3N@U;OgA$A*Ps<eUL5F-lVbKS`{;Cs}e=&(^C~H*OE#uz;jr-TGc)&-P
zG5%;kKLg+&<EjHc1@MglB_TcEcu~Ip(<EyE9pPFR4vLNA$@P$jomg+AcuiDB%`jWD
zJSOPzS2dIKzzmVJ26{6irtDyxhsH}n(r4hg`vV1=9!QUwS(*#zK&b<~DHsQ6QslI7
zfr0RV4SZqsuOao&l**2I=qAMYlf;CLkz<k-y!w9dQj1LM9rmqtJbm4*ZzuBl9wZVH
zG2$0_VsZI4&*;(qAoU=Hd!n)VSNSSGebOP8U_`||h~pk~c@K1t^zspq)lt}P7E<O%
ze>pAM*Bq@#zRCGN=V{XEt3Nz8WxnoHNYC^i`fW{$Y~G0i#oRsU&PH{A@fvVBfatlN
zjIyqoI>UvWAlZ%~h!0apKO~TaX$@=cav&DNcR-(eR|B$4dkUQNW!~BJ#U7BS4D{%K
zro-{c6RRu#QJC<UfY_b7JCTZR@O^g4rR;4)JjVr%Y0@g4{kCpUm$3jwnaoE5052d4
z?P*gG^Qph@{wy&+ch>#Zym>?%&PL2^SaX#!xuP^6ppf@d9r*2!yS-Q>4H6lafO_A7
z%~`2Ppa}i9ajYsp%8^RK@e(nN^7;x!Vp^ZdcMr;J{R-=w2ILa|uUu|q7t0=+{yrA4
z_X%0!{5g|?_?<iP1u-ke!)@Bvzfxesiye?PH<>+Co?=oV#&U&-O|=`o62eFhHBr3j
z*^DnIo5zK>f>zh?{bshSzmyECpFX|i%&8C3Yzo;Y7yar~N$U!Iw31CK^0aue+9+>g
zMQn&~{5++V37~MjZ}gE#fT<VnY#=qT?MS3JpnYk(V#+ph7^opQSB4zUGWq{E^Ks<l
zJt!Y?c>!o!YG~-2YT4i9=Dkv3tEl-u@3yHIAuLzH5fNSV>n;B>?2MD_$56dv1kf&(
zV+X@oTW6d+Z&Vp6zLCv87@40Qb(h9>9evUpc6k+gcYM1uRB#WXW<@R`Eo#EC)w`uN
zv>GkxKXejC_7@KmaafXd%+T8w`~ep+Ww+3-gGS|nrePeA&FC>h{zshfsR0E99{Y=2
zR1Ib3++6Pb6kx4bp#<jezbG73Bqy~Ff{Tq4=mqNGNGZxg)y79gn5UP<tx*bqev={Q
z%Tb`~^mrkJDgPlHV(UFD7G`S<b}kZt?09JA{aZf|tOpZm^}kpNR-+jNL)Eyga)IxF
zCIG7cN2xX;aXj!0h=3ut79eBmVY}JDI{7w{uy}kyaI*`L->v8^)N4awj5pnoV_Beb
zmdOXGAu8*8kRjFy|5s)EUC8l9=Gh30R!643a`D1-XBZX=TeG+abyD{S11(I$-a&%E
zxw(Ogzn}d~#aRn$-#qDwF2u<}d(N6sL9xk=L6=4pu{wS%ROE1`MZI;t$9wC<2{SQ>
zkBw_k|0C7&WWyq`2rC`z$lS&gh6Evqic7<Y4n8~T7msiRTf70B|7kESl736-n#vZl
zItO1zH(5KAuk7>Z?{P(2)>d#O91MjDjS1Jh=gb$Z!Mr=Hunwcglg-AS>(i1FLlGpl
z!4x(Op1n<GX&`0u$#AXwpujSfWcu_W?&`p@&=t)%=V5F3OsSF5D2v>{$KP}NbndHR
zh(REgq`N_@q~O~-rWERA<|uDr)mX+Il|I_bt%Oz?1R=A!<M9FTK4y26zeFZ@8M5<a
zy=tr(PvUH3taj5@S)f67KV>ta8!;iL1bnX6HmxVUNQ^sHp6WA)E3<nLDl)!b|8663
z_(XiK6)COrUwsRP1Kz*EdC~i~M5N%>r~Ky2RSMhz@Ir3bPe{7XBxH3PHb?z?6hJ^;
zT)>Lxi)p>{30fQQ%;ihY;h*t9-H3_hv-lSzEk1?H%gb1yILphYD2<?wsY}z~*a7ML
z$r!nYaxOO>*Z3gU7)n0iiuvS9RwnuH#s~Q(c%oQ%??zLi?ab}7%Df!YCV%N*OZv*#
z=st>Ibn--twAqN4^`<T68`HMXV1jB3@27ayy!-xXX`EX-i(*O5W=9ZQDlIDC0%Y}&
zU14be2tIU=fdtP4oV^MGT$Usbs;-1F%vTMd0>GnzWUwm@Zu{rV|8>~^b6kLT1ZEC2
zf7xH~#oJFQH#B}E;dwT3ED0Wzw*~AGbq|`7M!Js4F6(57CFc$o_7W=?6BzuNlYRLk
znz&s&f>(FaHpVFO#8QUnZYA?F2Otk#G)~=m7Tmc|_aX4?ca8s#S8H}A+=H6RwY+Zu
z_&_U&Bt<IWfuY<pxl@jRr7h|DRIE^=-u{r?P<U47yv54<YK%LOrx^9n$T4jtS&rhj
zh2uzuM0(MrRZxALx{Of%i^#_Y@?bd$_m81xW+4|8*fPnu0tp@=qK;n)!+hGz+$YW$
znjKW=?PEL^ZM)i|>xDjg>`tXQ8bu!c?IyPyoS$qBEhED=S(`(vKskWd6%_KxIEK~C
zjlNQ0&PdJo$oAOxY09;<n%HMT;f$C*D@&9^yqK~hC#&8Kbs9}Sn(J_@Jf)>G3E~fR
zD7?g>p%S-Lc5wEfzDn7uF4<=};A(t-1|Q%Q3Y7i;TjWny6o+y!!-tdoUV1n!b}aBV
zjhD^}<Y{@lO^eeW{&o4paX)0eWZX|!SCJs%MWa<;$jX3tc^#;-3YB**Eb_N@5~9Lg
zuERT2&#UzoNUxt@3cum(W=I`i`LyOaA%KQw-rw6uXHFoxYw;ohB(^Qh>Wqwk2t7MP
zrLeKYyY|~bXx&Fn8mdapza@H|C~p3lR~C@uY}nz)Q<EUzK6=>o6+wR-P&pFo_GqK~
zTKg27Qm4C9VJGMC{Zzd1bIEu(fF7<i9<Ck^pyb>^(SDf2P$ctRtB9l^Uvhz`>i*T(
z!(jx~B5dpmI2m34u^TxQ^+=k*6WHOwoF4}uix>GRj^Ht7P^U8AUnPFF?}1SO)BL6I
zdZIGA!KSs5TA|$QyPYwy=^S#)m!&qlP4XGF#}wE#MwI@Dm!ld!MaW5<9x__{p@Y?C
zmUMJJYHZxvdC)a2`(G?i(X--f$VVL;cUHgyv9k*#@?pD@<>v6UxmgXjFjx(OL>a=y
zEW$?6st#qWs~fT~2FU=Z+h&krFzzf366FJw0)rZI_>!~zaYs{>R-#fsP~Yvx8uIx>
za^^=bQr;`?G>9v^kr){^_L4YZ?bwD?L$)3NE;Q!ZhiAyXR%Z}>j?2sGmk}@CrGSX#
zIYR#cQ|PXZs{(o9jPdf$&XvEBo3@;Y)}2Uf;FP`M^?7*@vMN9VnR#2T137@4lKt|V
zyOKFtg|}}BZGX`D8R8ka4^p$k&VJ!+)|#O5LOf*Yj)#@D$3%Gzih1>1!btRDqyA*-
zCa(F<;jzPQ*y5$x6>lQAKe7+2sKDZT(z^e14EMV4e-$0GGz0D?O9WrtgP2OlIU+sF
zc7AN1b6r&pcv+&nb<z#q0`1stQKNf6q(<0td8O?NcbEdT>QkN!1jtB+($ZKmx@&xT
z73!5emAu-h?JuTSC<V?@PC!id99A1fGK0=GV=r+Is)xVMpnrOL(<NA`GOx8TuMV*A
zpUeR!WtZb%qroCZo@_cTMp;AtIirkG)|5gvyohq(`sI3Jy8~vbfjmbZZ)_&_=8RvG
zddN?oM@yVIv%ba3FF5D{v|+kEW2!omMlewAz&}}0`^G;gt*~F5X!mg6K%%DDz$j*H
zVE8kSeCc-u<@uJ!PKss$$3GjV{J8Yh!Q6UM_F_#QQ7>f%1L;r}LAW<s^xcgwGa0`y
z{#I?#IyRU-PVe0OvK~2zAzs$eHdbd6vR-~%l^xbH>4a?>J9riAh&-Ru9F#cNnK@N{
z&)!`fl|CpYi^07(MNFqY)MU;b6DH{S<sh>+k39YOi>y`5oM+t|zL@>>mqU^d3-*OB
z!(27LJ&R~6lKwK%d&T%MM`-b%PW}nr(&$CB8h^4OZPI=$vej(^3Ev&XZ`0_h#eFE4
zS%dDZR4%DMb3(NesvXy0z-l*7fO9_K*8D=f4^vnzy9doM`v3uT{?|QdO*`x!R1dpW
zPqmt@!B%zPhoxKXVvrmw;&t`<w+3$%ZW9J`M#X?M>erCQWi03QpcIn!e(0m~dk{mu
z*UIu%@BdpISh@#kUrv5oN_nT`D7WOwlDksUjx0Y0GUsDnnC(99tE&%km`$mb4S(;H
z$}8q!!_@%BEwlJe^H~a=aDsdiVShk7Wgq4|kZNFN8y$gRvvb~Y50a~_XURTzO&-|8
z+f)b{N)(vdArb!eE@ip4lc=w=P(4kZzZr2@Yn35D>)%NRhx>43Vw9!YyY+zkUpzNB
zMMqXQf?q?&%k~)7ILLWCr&YZCXrLQo%XS~k*wcR%zOPLxB13<%_7+OpwN57^^IS6!
z&t=A0%n#hcro(gfrCbTbn!u*B9?UrLS6Q?cMN3<y+3EsbnkUyM>J?d_H+Hlh=d8>=
zw?i)K*e3qi^$x?l-;*!V&cob6sP^v9nwPro2mg?y%{Bhk6~(vImaXBhuG$>Xz6<s7
z2a%Im$n|QygXj@8*6aA+=5jhtMd~R{!qqihg+G^J%1VXBU@E`O=XRD;27es`(T&UJ
z@r_xLejUdH>UyA}1NM<9k_+ssn2=Jg6z<q1$HN+hOH$kiaWWN#TJ5GP&9YV+ta}$l
ziXk82DXnIIImCAYtgII@@@q<tl1DFrV!yhfEH+Kg00@PW0RZDPj^eDCVh0B8jg3nI
zh5g>xMyg`UJ5fH1)D!^M{u6tg!YiM`D~Ue{o&k<Id~>5I=>G(Ojb(#m`2b59D9Sfx
z4G4iA<#XpTBq=MBzs`beV>>T2zNyF6&cJZGqFH-e;?9DLb~(Wl(SuO0uMk45PFbRG
zQ{%$PfZ(jL^GnOcKkCfLMWnfJxzWUXb|8IAGT>Ln-$%a!ornLh@Xw@yI`$?~CRvEO
z!PK{M=*GHPa22H|rPt)3%7LOH&*;Y?H_LI_dl1~9M_!oW$1m*zhmB!(03ZW#1pvqi
zr#D(nKLejMU<B@+&MxxL?NH&?PQ0GiWwT&F;(p8b&j=xyk9O`ATWvAF)Rt@T{`Q<u
zVD8PhSmg^=T$}0P20BWdZL?+`qc*9R+$>SKr({UidBI-}yqng&DVx$+)+9fl9qn45
zuLyIjiIyF7EcH2eL@a}PpAYlDcUa)hG$W>67dpswzTJHWoj5S7F|(?uK0z&{6T)qy
zg1k1sOAN1J^_7HKW%8iP#glNs)67m+n#xH_XrJpY?n*Fxc~dF)jIP0~Rw%I~(?M#-
zc9`=g&F&O5u(qCrseq++HM2XC@w6Dpih%RyRVGZX;gOtq<Qnj7QeIQV4-pyEuqvF(
zgfQ*990uzRHvECDFLp2fR*SskiQK?*BhiCrG@1YX)6gp${`1yyByxA5*^=L4=z6Ea
zL1N53P>HZXRn1$SeBVcMb=s}O$CX`cWk2@U!Y%Vo)<@pMZlfR37MQ}AAGssprd}B}
zIR%EY#9bff+vw`t(0`c5$PhrwAkpaF+HA94DdTPy-R&W>s`R>l=6^hit=o}R=!#kK
z*wBri<{p-)sj^`oLzuW6tPcKtId^4OPgAtIJyvP2OWH&5cowbBrjQjw-kFHwJKe|?
z8)4$}9-NC+L^ebDwgVb*up0L;w0WH0P{5btsFROJrc|)YP~pJXc-!!iD5+dlPmP%D
z*y029XD-p`@?~#bG9Ifhxd%0CTp?v!5@CJiWcMI@*tU#Ynxi*XbEqNve6i79z)7xo
z34z|mX&c@xz*hK>&rR4<Em-|g;icF-$<w&oA=&FBxCgz|1GdwjP{;C^wHe^9GLiv7
z5C^CVG>3E9Z@ZmG9iIRn;Pn%?{Ei*uQ~&<zi?hFT^h{IUQKm#4S<&1KL%EC5uR%xV
zerq|bBk+x(p6_e@iofT%@-%<QahX<8tIjm3XNZ1RzS(9A8&L0bJlxi#jn0lXe;o+L
zmRR{3|MkOVZstC~9hCvQ3-ykox)POVu^}37&zLoIpBS|DRTY0oV0c%s!#m-c8*$7B
zXVLe_)ox0=UB`c8M!Gw9cDrMDa1se0q>B;^C|@&*q|5bvtj%v{f8nk^+Z#S6Frq4@
zAKj1Y@qX{;q&?$1KVvTUcV2facqF3UqCE`_O|=wnzdmO01XuzVl0^LOecXT%&bPix
zW0uIPG7SjlvR;Zneu9QP<)0G<QXjXAI~&qHLJpLK4U`WB$>{#;vRS(+c{wHxbt?>y
za&)`=j^DRkbMpUw(e+K%u=8qi-WVzpzi@_OB2~*Tk=|d7EgAiY4CND_w5|<-vuqbm
zs7C~7Vd+4kUWhn4rNWn2nWw5&Ud{Qe_)l*2>t}p%7O{M70H{o{w?cUYph~`RC^u7X
zK{wGLO*p0w@=L=tlx)nZ(dJ4mG_}ue#@<X8i#_&}&h=2HL}uYIWgFqflQI#ipesV7
zFIn5T?CfY<CzCoA4$YvOCSOQLy)of@YKvwrg9@fV>xZYjm;Ov2)!pR^1SZ2mwtXPk
z^6pQS##vZntKY@grtEm^8YcLUNk31sqnXbAtbI^O?Gd|}0f8uh%{afkoSCF7!{@zJ
zAO3z@Qb<M83hBnWJBM|oiLv?un?%?FjxYyFJa|?ZYdO9Cad93J&p~lha8g$40X>a8
z16YNNQNZm!9;E~5KH<pE4hi<Pe_16g4!=X(7P-}3=%18TdnoL-{6;%+I4X$5o^x*u
zLSETO)IC|Zq^`LKH7>?;^deXNG^Zo6l@|^;Z<fY;zpNd4QZEB5gWT9aYG8;HsF)pF
zSqx)f!veP@HB<!l7r+woYrQ^QURr^zpTZW)kuB5@lC(f|I4pQuICBsBJJ`x{09nR|
z{Jn)7P5~TnoCm;q4WHbD{z)534ct&lOaVXq76W#*aSw`jha3e8-U2++9l#~-j@^Sg
z$YGn8|Bd}mS^yX&11QIR#`a>7o{)9J;oBEMNLcI1Ghp;|ptJW5;HFmpo0zXF7VF+e
zZy*h1Kw*n~NwG+e{wr-CG4j^Gx3K?BM*wnl2fI0k^hB0rt@}oxJq4!s2*M)p8@9fn
zI-;Ewi79;#f|LUT_Fj$$eW3wH?}r_JQrs;RHZ~hpP|ZBUy9yC7fDNPFgM4vLrIvo#
zUpT`SU#5vsST1b9`mKFkVRC@zkWW+O)jB}`;D`bA|0Ym8|11HX*)sl*=a%&Vo%!&r
zpBG%j0MM!}=n{H;cxG}^HQ1aQd9~4cG0zwaxgbJbmn%^bjC<F#1DG-Lr~uaSBHv3D
z_^(pP^@Okc^$(}2-#)>2M1OO+l(#aI%OxjnUzU*9rUiVv88~tBOD{=|8C;IeiN<M6
zd*ZFx`*gjEXxtgMT`=3vW$~r)xvj%qN9wt{=Ds;=QXm<YYd%jUzOG+<<A+cA0vQDA
zvejlAGUU;*EjnS8c4zxqY>Ib{vtoY7-rk7daoIeh>Z9ARUmq)H2?-Zu(*)l+k7eg+
z4&33V(Iq07??EAuYbcSyt8+Q9zfb#wH+hG?wc+QS&%Zx{jB-!QiZ=!w%m}NGJ5c5u
zl~%_o(RTHza7OIDPd!r;BsBDmav<D$*EEcy;+W=I%Cj-gPduM-NYZ)<g)K*}4Rt)0
z9%p+uc1}!k#HN({Zj<tOH`d_0Ow|NBkMGvJoJ2_mp6iLgMvM$y(^L7<aG5dDt{Ihn
zjRPSh(e~+ixls+3M~Qu8YJo=lZWTqTb;tRTx8#NZqYD%*JPiLW5{r2Kx*^3oA>@RB
zRV{2kj^&s$$8qs0<gBl5_&r5m(|h%x=bn_Xvt1(nk^M8P&RWxk$UB4WPUihHGPvNI
z424eGjFyzBi?S-U@x22^D~=RVb|rirTxssTg)fkyACRgA(X*O{`Nu?cLPG>*2fPMn
z%r(U<oZn^3!AGUw7@>=j;T!|jecAAa1GmXv4)}vDcoWCDZ~j(o2$c%2B;*&T&5n{P
zj*L%owc#sU|H@jxGbv?%7vtn-Ra>U`VoSB4#XSY<b88jtjq=2|i#$1O5KHpK*#&Xh
zP7lkTu<tH!f<9?rCf00j?BE(7Q<ED?nDZQxQY)c#Nh@MtTz8Bf_(yG=rqNVRwAJt8
zJ>E!SjuNYWr-1>FJ#THPCAf;1a*olD8~#2PG&jbv3fK33W)*5*3|fObei-6=>#}8~
zmuInZC&w+zZE=soy|nUH*(p{a0WT{NA2>8LzQ%&j-#vwgS!QHiP)#a0;Oz*w<Nny0
zb`gbn+T+7$l|p*E+$z4o4_!`!y;!<_eL5v>>>97#L<?0Jq{OdU#?yX7$iL=Ps$n@0
z^&RE-tia<lR_fj?B*+b>aN1klH1WZ-7IC1pZ{l>E{a#(3bl6~%I4Ij1QHO-hm^}S3
zTSpgWDZG7JMqV=x$C}w}@K?Rz1p*0x1!OaHU$eO<T*=&nbefU;-Ud!}#lr2O&&7u?
zwQ~%uyvzlh`=6(<=6)D(@}O~CsQi#9LlUU0z;7)W)qtzQB@Q7k2yXG{XOAVu0D@A@
z;qB3Fa=YxyuVf}y<INMu%iG-+>t+rSor-H<d60+bw-T_Kcjg`XwGWNn?$(%Qp73tY
z7!FbVHa^4=>qZ`x<tzRI_+ITiU7CxzMMMt9hFyiuY>XHB*UVk*v&7r%kqxELyJgv=
zpV^IpWBiYVNuHu0f9vEwr%)S`=$3%Z@%^lB%_k9t@80dGa>H5-1<x72--9H0fk+Gv
zlAeHU4!~|!B1_A+10rm_u16buVG7%j3V!OVlT6@$_z4Jh?107%)K7>S@t=_8#~enQ
zGAChD;t5y>e=#AzSBFWILVD7~*o*3Jf2f~DL9fS7s9A*t>pW*U$b6YI-%&5v>?L-H
z1n<0{)@9yZx*134I|3{$<F%Gbvwq8fwov8|&Wn`DALSP*P0>rUIfxCEQ_M6mV#Z@5
z8HsP!-X0g!%^zW=#)tebDuJ;Mf#~^&j{0={7T`#B$6y^`$mYd8=mqR->-_kDB)W})
z`)(sYRn@H=;26F#;n!usW;u~9g2*R8!pmH|4FLGdoY~&`ecIulLs)JlGosQMM^ArF
zr<wf1Jr$;gyTOhE3cNf2OC{k;wr*s$Ilu+bcREG9e6+A^@!G+)YymK!*$zqeZ*HAm
zJVg>&2V`X$y5shH4!0%iU9)IDWu#B!dlmMG+Mm3UQ2Y~TDTGFi$XIw19-((GBhJ(J
z2<Zy#mN9%a_`!3}v4<YITx*$`XD8};?NMFTh?6c>=@=d3wz*>qbV0sK%(*rKX`>C}
z#gS|B&3GSd>1CcN&F9}+vSV?+OnJLTp4woI0Kww|9MqMuFmi4MrpNx^EcH){au{gf
z<--)KTkEzvE8=eOl6VmY)AfjGXrzrARt@7WAQ&2%$IiekjpFF27-<8_L-nsu%0vBs
ziu&?+sQT#tE1^`Rrl>GVPpE9!X$;A}kEleH!Vt1=G1inYgk-`9Lu8b!l^PL~WE)#q
zvW5xSW-w!B{60P3=llKrgBRDmpSkyZ&N=V*d7pE$VD<TOYA_erQd5-lo(B-2MNUSi
zXonYJ*fa^%Eqw6M*WW+k=^dd&7WGG`QKLG`-_c3Dx}N?rF2}JO_jVRV9kfbn_c|?z
zy8n_9^2&lmy)WH$=|V)GT^T9B@c7Kf>z>z4y3IG`Wge?<DOj`3gJM*~gbcg+gQPQq
z2BMyi%nml$xg5#!XYhNG@0WWmo_ebMt*7<d+Z##Ic$2wGTe={FcN8-y%rSVAY5e1N
z;0@F|jkaYC6gX+^n~^_{J=?fCc@<`Gh4;vjFBR_;(hN{1FPW{voP8Z4ll|@tt<UX!
zN_1@8*y%#Acw{IyBwMKQPqx~D@ac5i*qM}uQLO)?%PeWNU+}i9XQ>`Z4F_8f8F&mI
zC@h(>Et<vJZ(O2`Fg%5ZM(#ZO8p)j_=wf3o?jPM%LbS>|<&qQUFsn_El+pjp70noY
zMIudEP_ZrtFHwkrBDEt;q5^Su+i5Cf4{@<RuRgM;)R5P^a_`97OgHAu4~MNpX`jka
zI+stT)pg^kHYd_%f3Gn@@?<2l)$jhkgb&L*Ry$mYJv7|5H@KGQ?sCj2X4(&-NWG3Z
zZ|UiOQodR85Hk9eX8GC9ul_*@y=KD6p0vJGEAeuLJ5w5&19|&=IlBkFyiBh;8SOj>
zZ_q5ISNy(qTj(Ki#`KYYPj6jc&?{A653{mmzm1-6n~81LJ1U~MdbvJ)Qs>k}m!LN;
z4<7fjDz*7mw0MbyX~xZ;*C}$(0(qj_Hgo~ju^WhDz1uhLgB%Qd^itu`%#2gC%0J_k
zeoeRKYSc@lP(wwuUnErTreMoCH1<UWaqrxt`m&L)PTb#Lc{Fpr3}EJt1I8K8Iz#F?
zfBEqJ-5;`US1$IYFKFRP$|4f4;cL)y?ViYQW#5k-d$$#F=8lCxE;61Yg<R(-mdF`l
z#z8if*0K%Vnn%+110dGLIfN$+T^|i4-Kt-$oeJ)}jPUJUsThO?H72a_z!fwi*F7dO
zm5b7-6G}PyW&$T(Ci#e83s0+kRW|=C;(@nvhrJg1y6I?7*@5<%$}eO@8BiTv|2b53
zt+RB$AAgo)j=)_$$~)pk%;(y9PCbyRKbwe`GEe8chfhlf3~K5cs(&^;`_i#h7?rpw
z>18u|XDmDCp2j^3v+mi^XDpeq<&@v>wV&{<MN-qd9Gl0gK|>F|jm9M5dVf?bExDz0
zUf(1=hHuItr~R5Q^bHayf<?(3*h=9%zdDJV3j9xe;Ng|wt~0GenQ><ePa3OQPQTrT
zG-w6mS9LV{%frSG%f+tU=B^_;_2XTU^jKlmYw)2ZIfnA=^m35xf-<uKX1Q!|fJulu
zan&cM)QfbMm@|&2I);Bb>Hb)OS8eO@8D0aX!by1N>jo3WVlt>pCakJtnwTj1*pSY4
zo|*K`nCpwvxV6Ck!eAU#r6>=){@UmrU?&mKc1UW%`U)327eE4sbOwJ8(c?)%$ITOk
z_P!}?q>iL1N_t}CPj@nG;iiFm3sSUalmG0_0E&lAj*ZcJL8DUa&u1th^Tv%5=VLkC
zE&phyCORvx8QJCdX&%)}A9w^&WEAHu8oN&6bfhTJ`%a_=cFKG&%I6j8f)kI67vPO4
zPldQlc(h>krTa7TZDLZYC&*!t_mGtg=CspiD~PW~8YW0oM?rsm9ied}A41I}g`+q?
zf4_j5RR(>vi9raeq5U~&XeSpXXBQ~r7Q!HBtcgRo^(Lhof|d+qLJAWh-9IuIe(OZd
zpdzj=f(X4sx<xvlcWNk$1ZxPhZh`)hCWwHe)C>72DO%l%R2|;awIGy8w3C>VAe`)e
z=@fst&zGGY3~mybw4<v`s8R!bsQCZX%p_BKPbjs#_^kOMBPt@K;@!^u*Ypq2k`Eb<
zD>_u%H~Fw8-FDbdh-t^pc%)L+bu1S4^aiIL-hf2TI)>gN@`mSJ!z>C@0S{K>r>Qzr
zZeckF2&TOK8smpcc0K!nv8OSz*NpeWEC_skYbcfS?g!Hcct)G|y`8EHdm{40^g(xw
zuJdlBJxSrfnJC5d78g`iTtSwdwhgQ(iR?Q&{NeIDcUj?x&(Z!pVd=C(m#qc`bGom%
z>}j?z2&Gw%&0l$3>E;&X=HLHx8sAz|dR6vtc|FR<&dCckGiiMY_dr*DXH+L9pY6e^
zSti?>AEDoGxaxCkHr%h_HKGoK8GcUhaQ-o(@_}}4hxTQ7e7LFOG3?&l&j(!iV)Moa
zc|0f<3ybk8Uxw!HO&^-_J7V%CXMZ!FO8=(Q=U%ksYp=(W18Ru(<Ftxr4$J)&Z}t}M
zcMQtddw&}W31-q?A^WNw^^DTh_NTs`wEZPG#ZftJ{i6J~1u?gWs@Xg2wCVk4weyx@
zMMqE7y*blYA}lnH*W#fMcAZ3%ba;}E_%_5d|LysWp|T|2J-JhB+<BgT)i%ZQO)sy`
zCYygd-tVXYKjqrEF;96r%3C)4bsKuP&}aR`(j7=R$QhIObc;J?*tM!?m|(U)=M{C)
z8a=ew5_jr|%bKL*u|0v@CBGbv^|M9x8pSzhFzU5yK94ls%$$fc^8$(%J%(+m23oS?
zdAPc%fAJ$HO3|e??cO_|Cmvd#5WJUk^y@NSHXuKl?8<9k`I9<-^~our(&12sXJDpT
z)HW1+ejp*fAbI+2+LS@SkIJXFUR}>jzCutR{zjj(i97errfxOT^-sa>Ub%<JRp3q2
z%E$HsDWD~D0Yf`PL|jZSb_D4YNR_+IT|y$Mvek7W@{(p33>u!O+b6K}=lmy*%S_V?
z4+VD#wml0L-s4jfw3OX>K;iB+7T*VBv4*v450=E$Q2xIRq9oYo#!wipt3x{<Uw0TA
z8Ie9fR;+8(*i%=ri}*HLQE}&VN==E9y=41K1LdKZ;0UKzKJs0EBz7Oj*`dVwLe<qF
zyUTaIn8fdOhaK>XtX_RtFEl)q_ADvr+|XB3jq4E_@zvjgHC;B8wv2KGBEnKys{&^|
zb9wiqb6<$;`PC-m_48VkpVEkUeqJHfQK|Ts1eWp^fAGvd1<t9jBt{TMe0+4}S#1w5
zBicFrJ72MzBm?vn%ooi;EaN8ri!DCCj2Vfk^16%sm54cRr;0bT7E7C*Bw=k>D3FLe
zbq+3S(c&JVz;7hFa=Vj@r8NpqJ>6H-8fHV{GFOJ;{Ed@(OZPzviI%N7zQ)n>I{U~U
zPAUD&(GjEW0#gFSX$fhKtunrOXvGc%5p)R^<?Nao{abnmvMF`4Xb6E$fN||76eorh
z85Kmy=>Hug@Z6m@2O>D{MMcvJ#rvuu(gOmF|1NB=E}cLif6#(m*$KC?XrwDUGp)Ny
zs5*N#DbXzwUkEc%!eXW9a}lJ|*6H1m8BtF8gqf7^Xnjb|9B$8znu*$Lo#IZ&-$|~K
z)2_CujHUqR=pe|ck&BSgP==gQlhHVvR1A{_#bl)!cQKS*#_0Jx(|N@&J%Dl~3quD+
zL_28rCc>k(z`xjW71KxJ#Gnz`AB8t*+fWOfEhFlh<8x!}c1=xaW>3LQNmoexgI3?J
zx_AB)<}L5jzDQJH_^GTjz-_i^0Fny?Azk~?Httf^nbX#f6MfSS`&}6yM5a7!O7AC)
zI97~QP*|Ed@f9^??ZSVE@vT2APTa&;oc38nxF=!U;9JW;<>6={Xwmmng@o{#b<1KC
z<ABOlJpyo3O4*O>1|~C4Ix7FUu<M)P@Jb2hj%VELmKwM?a&yqsAhPDbdirK@^VYRJ
z+mOy(&@<=87kFsC>Kik>i4<D<`IF%+rRsZ(g~gd(F}sVmd^4SO`RWm#-{_t}ZyKJA
zzLoy$))$1#MDOafyjpd|_50<L{^J|%EOIlq8OHyh37GV9koB^pc91^(T0y@!Ax@6+
z^I`(M<7S8^TbAxCO7d;hcx8~i)0*qR#6L$Q!Y|pz^;V^84&08vH1hG~-%db3<nYm1
zHaBvW8%)bmsqsShq*cHEI&w%A^rU;1NQxzke}A#=$8ik>CBpAcOzs5G(y0FE$I{Oq
zakad=>f40QMR$o=!usCzZ9`Qz64j@-M9<!uJZJlkJvDdEvv>spo=l9);`^KNg@h*`
z_&$q&27Xr7xa41zKD@jT)~?uG{%ZKFvh1R4d#F9@{ZcqhgNStMBUZ#c8RXUBV?I`s
zaWV^RaUc!&)JR9wQ-}>s@dE+z{p(zh{RDP#zvV3)xTg{pc<LQxDp7qsG;X(Qrf(nD
z(m*A#25}*@FvPUOA65`@Kt;ArQlS3Nu9=#3+217$(dxCNvsqi!+MqM=Id9s_(Q6Nn
zQ(m2l|8$z_{mZgm;<D$s&o0LfuTT|Q@{KIpD>L!2IXnDgQ#8oqp1iGFmny6J?v&qr
z7rnoscICB}A$New;NwPrs{KG-K#%O_K|v|pgHIwt)l>M$2tkv2N{ok1dZdBK$f%{D
zCalze-|oHnyjNJtiqQQ5b3KdNw<(unR1Y-7$!IuC2x}mnC~xk`B`Py+4RL3i{2`xo
z_QiGg5%V<zd#m?95z^RebxTy-Ke1zZ<eYJdQIF$@&d=Zzs$XlNt*;@-&dSVO+qKh4
z6K8Q{rkVatY@b4!toxC=e)>z*@kh~jv;R~mvF7v7V;q1KukE9&xsHb)`jhLW?{2-H
zhHOeNM@=(XUqWcaW7i(|cs-dMxP;>!*YxB1BPa@Y=|H1Y`^C>wd~Gb4o{asU2@l)3
zVkrcvB=5Ej<sY)ms=C&Bcg-bgwJbZ4kh!s3SE+;)G>e$eTXE&dFwHY7#OAAXx0a_x
zEx?!n1H5urdRScd%+&6|`??{RPwW00*Ca3@v!&El-q6yVh&H9}Hq7*GLzYva{P%<G
zXP#}_5bCcZ#pU;Y$G8rS!|5`t_$!|fAukhq#9hy@CrdU7KsuyYB1t@cp4t0_yRX**
zA@g)!8zNb*XY53bB~dw6hf91WUDVph*X*WijCxVml;MQYh2VTP=D6YYzBn7MYJ8^p
zR5vFeF69SE%z$ep$bNTyh7W^w72|A-+@YqtnWs7%e`~H>V4lj3C>}ZK*YTpLq@W_W
zeGp=fOSgoZmJb=;Vao(zt@ZDpvIOc8xiNy=kR2DVOyia{mZ&Zm2ATC%E$N6-7V~$q
zsEus2DkTaisOW@Idf}emGR-MDY<DgtQ=Eb&;n+Ut$U32`hHIY|npU(!03A+Q<a>e4
zmFuCg&{3nKrRbpWM8}LMi^3O$(H6VHWu@(C`Mexuw?15CvvJ+fBWSEM1^t~sezlg*
z8zbW<^S#T%v9On3(5R+z%^JCK_Wdm93h=KU->q$H{;?*E0v8LS5py>|7C7Ah6=O-t
zH<4ENW&xbS=JE6o9$YI&@<+3?<k>+;CjY=XAJJSsT8y#8N_vTTMAHjC_%q2h3vTZa
zfzhmLJIX~X$gTU04uYFN-uk|gYPG}Xg-z7cg5aP%myV@6er!(ZZx8vEfgb32a_k!+
zN<2E0bF%{f`B6zxiok%~{Z)<K5|VBy#UGrE2Tt?{%rzC3r`;WXpE5F-I{xP*N4%e^
zEfAl6@v5M<%K=_ZflT7<0=H|KrEOwDaO{*h!~YB0y*W%2HTkylpq8Va+#|or3L~r0
z@EyC^JkDs2<Y85z;fVBaKk(1KwfY%Tv0?LHAEQ-W1`4da9ooLHD}-hqy>dDJv-K=i
zP3`kQZJl1W_F2>}%@B!Y-K=e>vbcFN#H&E)6M=kyE2{LA0&)pQT@IJ0>Y}tFFJztF
zhMp~}FgaI&^T#vJr`+8nadI%LAUxQoa}F#q0ju?e#7+i{jn4_+eJLr%+mPBg+^HQo
zcW@h$N^5u6EE=)z;QPMLcwnRLW|kCY>nQK2dHbb~#G0DMy(O|U0`an}?=lFRZvQ4<
zKu(``*n#?3bQ@}S0YQqWm&BmF{oT_te*LPQr=WXj>&k4{do3+m-OW1IKP8-dq<_~^
zrrHoQH#$6wsOj0F=u0<)k^{k{Zc=H<F>)--JhLEKzna9|p5pSPW%LGeHB@vm<SRAj
z*(1}ScDIcqSZuKoD`CRIM>yy9ozM}%E>#1q6sY3S?PKoKxl0|J!t7F@+3N<>TjLl*
zv~A^DCBXZ3x>VRNr3H&~hP56~N{IUm`i~!OlC3zVwe~$JV56elonACdE3hj4$4$Pm
z>`1NCg>sMHrXlP8@FiB_K;p`k;nr>FOp!-9V%H(L2<o(;wQ=;K{XZ_Vzs)<neS(I!
zhA}Ui@%9!GXqLcc(HDBL4rQMjZ{CG1$_@9|`(}sIwP}C&e2p{d`MhbZrv6m(#*$y|
zUzw9pk`jXU0*Xr(T9^iz-PXN0$aTzq$C`;{&8ej@=t5p1nxLRDnd77!zoN76bm3c8
zslvx3^e~6?HI>BZfRtZ=F;)~rW65DA>M(q?c&E#}36BI}_Gi@L90WC+a76hEUkn2M
zL%h0&LFsW8OPjY4-4YCQhOyi}9Oh~iL}5(S#rt@elUeRGWbR=|WTQkNmkNpB&kzW+
zc7{1Kdq0|C4xK{CgwmCvTVe3dEbTr&EqrGdn6}i-wQ2K~Rz$AAXgUaNL!FsX4orUQ
z95>3dRmuug|GxY$QzO2C3_*YWI62Fbc^#>NsKSA)=rLMBqZs<GzSLJOoC8)*KZ-Yk
zq-g~{ui=Ly;-p-#Dz=P&N^s<*Z?RMpBtPUUHE;ZWC|x25R==?$O872~RaQ}enY<c2
z$Tr=E=1!GI0ihk=y5kQZ_m&41h(rB&Hi5xfvi>F8vL~DvJneuLz;a@0-X}IKS0LZ~
zn&n*k>sNy7>=T`KXswMgj@WHO!gI0sRq5Fk0^~vzq3vS~BxFcajmOgQO<DLqoPZlI
zKw=Sv?`)WA1)B*ykf(zDr(Q@PobEG-ZTfK7V6`xGw}}2({{^M%D*lq@N?MOiD_!s?
zKTNKtv8TU@>9u6;8ScKpl1;}X-0A(QWwn4$SW|DkMSo@2y_j5q^$`-e`5uetQn3wE
ze!*_q_@+f!OFF2_?1>3a3Y&bb5jP&pcK<@pkN%zxO0fj!_>GaC!RLi&P~}fL3L$r3
zj_wwVX3e#tcTGl}@c>oX1MR@<&HiF`m8=Pu<Z~9J`M>v|<V7?<^Po)kuhT6b+q0MY
zI1qq)fSt<mE0+rqBS_Z6m!SU~p02uTSs|meWSXdeIAhav5NEbUp;2*yQ#$)*$5sim
zgS5isBB}4)Tv!Yx9ZUS@$T|#)i;$iwSuI4G2qNMY_AhP}DWf7`z;JAg#WSX8%Dq_7
zN+wAGa<3{Tij$F@zq4x#Ne~U=-wlhyK@Lnf<>?3|ebE^20ipR2=v_#AUr?M|WLj|x
zM97fhky4bTbuv|F?`$nn^MWNq8JrTIFs~!TyFu8=#JQtn#$7&G3D}TDf&PfyW5zPq
z(k8DcL-pEEOS~qZ?vt}5LRvmk7DvPk=gs&=daI~Le7;K-aO|vE7o$TiG6Q05MOGI)
z!>^Y%4@C)U_q{FRRtB7%Mm`VATpEP&H9`jjAVxhHwWE~R*AQmKozQ1Jmd>O^?nvj2
z5>>S5P!q2v%@QTVa9m6ZS9&*UN-SYkXU||Ip+!Pu3FxgGxdyNq6xR%)6n8FPvWNP5
zezY;5we7hIccSRDeC`~2*-#4R%^wE95&v5A8^w9snzNYOqbr~@>puoYQ-MVOPk7m}
zoDcF{hlZ(*e^JK>D8EK#CQRrh%UwtQ0tV1+S7$NRW3CUv<&Xcu<psAf8i9o|F%zGn
z6UdAX_s|Q!z+<CCAzDPkt)QNVLWMV0U!RUlNL+cDAvDt9JnOO_!=1RUacgxV*y>!F
z5p(^O*xQ)!(5DWcSKcB-T#zP1NsiCv(B@BQ*Yv{6hMRbUjg)9&o&!1%B*;zhz0T9y
z7FxsSP==K|&&(s0tX1@uEGObpR{3A$Hv}ms{7hmdMMy6-mVb-odtE4A)WeKa&avY9
zv}8DCgyGj@TJ4xd-3mv!D7y>+)4mAq#WJ#w<l_P&&o-2LtHEu9T(@<`J}d=OCASIx
z{jbSqPdH<$PEP&LMwepQ&UFO~g012KpjE<j$4BIs!V^j1{CaqgzU8=+s9SCXt-Oo`
z?LO#PLVlrkQwGIbyBf;wkOGJ}V<zfY>^zeKB3!yfkp^(!sSz>?5i)nf#oZ<H82P-p
z&6P!3ANI+g02PotcdlK^BXEzvBa+YadB>SHCcRinx-vvbpx`biMn-f<iM8ePx+uqq
z+H@kQig>4{s3Tp(Q$`{sLQCfA8^zql+O%wCA$k#bj$GhU&SAaxhul%&Hnrmc@LUD4
zR827LXalw!iO+<jTV)K!^cX!Y(lDq{EMA(43INF{!ksIWGgq>g>9SnB#xa-XLy$C~
zNCeeD5s)IqfwU3`!6P9?zUkiBbsc?SW7u>opJfh7w4x0V?tm7Qtaaxy6r=htF)3~-
z)o;62taf0Da==5I@C`%>OAfCRI=IDw7(%{1C5Jf^{V;tF9gKN{_Fpo%+>P4_h4WkI
zH9bqqi^)0_C59)60<Cq_R_`0Fz#>Jz<`=MA8xl5QfduZ4>HvU`&NXHEa=`GDd^kb0
z`*<TBC=TTrJpY#vzC6-m*JsHx`2$L5v41^%My;E{|3UI2KM#S2@9+URC_c78^mb!S
zzHT@T!oq(k;*vNW_Hk#h8)@|%kt5&3Bg;KZCwxN8>wOil^iWDENaxMsgK=G_SKb+M
zzGY`$?jMyb;vAN;i<Nx64}a%$gP$K7_s=`iAlEb{(K9pZbh0jE^20r;w|d!(?3y4i
zhZY_pF*CRPrpkTMcTvz!vhR50q04xufz0fuk4Gvsq<%gdXee#RSr{abIDNJz6Hi&1
zS*I_dB=Rez7!<d86U_}|GWS8?VU}VIB1ySo;IS!<@x~a)2zK{u9)*Z6)`}h1{@d{?
z*%Nm#uEu!Ae*_oi^P45W@cd1h%g6Zfo0%p8p-({?9q1{8ITWTe0<x(uIMU_>L&+n?
zkfD5V(a4fgb6#O4eQ$*ONha!6ULWMV(Q%5el<+q%>4SWKqZOpjf;o=oekLsE^L&zu
z*4?WaBU3L`$<h~*tYpnWEPX!lf)qoa0i5xAK9t5@qNK9T`9f($pZ=huo~|nk&B+M%
z*@TJjgJilqd^$M#d^jJCC7nH8444q!1bqr9(H#V>jdZ%~FSt)ebM3(C(Am<Bh06eS
z?D{f}Lyrft(Jl3@V*}Y~OFBYHlR4a0+QrF03(wh+n@%+b5Jt|OOP6dVGhEqwBHevS
zhkOGEIW=Wq-WyU)v`IvXgL5*zWcwiX-!dUx_K>$=X-NvgeLT33<N;l;p4g1M)RVLt
zQUazI@+w)hBG~~yM=|zJtx>QDv@cbM$*G>G>+6QNmQ26=<*oh+e7NUMFnKw@E#GA|
z0)5xg4A=awuV%+)O*QTxv7xi+SFvSdk8S_VptK@}D9X={e<k0jY@4&3FijA9nOgfE
zUA)J))bs!*r0OX3&VilgJ=cwW^A_cnC>f=ze_9EC-#fJ^Lce8#qrr}ZJx_SfD&tjf
zUN7-oG|l8t3-5yN1&L#L6MHp54=X@|%v<f4;uf2mOdhjWi;(8h<)ggxQ0IJmKfMi=
zMvg7uD$};1-(^FO1Q*&XyO;nWbeqfE=~({12+{d($^%4r|3gc$c$_z{Rub9=*}nfq
z2em%brqEa(=F8W*P0~dz?fMouLtW#Bspo~Eh-CNB6W|7j#aW+EGO|UHwPTWh<_`!z
z@;N%Dn=Ki1wfMLVU!JxLQ*fXnCv>`S#Qev8PiD21p6pkpbNV7)#7`#$43Iy+l%C&H
zZutT&T)5!(0Dd!@eEQ_R`cm2#q+a4$uKpiyxoMIU?ZK~Obzjc$PJG2**v+Zw3qggm
zWn2PyJ+%mqb5_!JqtP{0pES<r2@ZV7((g=<(>n4lQQoH8mtHi>!ri*8*cm;gA*Nul
zOY((e<vL-F&x@2B;&siyLw;Na$z?nvkYpfuuQym`&Tv+TyhhdOL`ThTL;V)VusUN=
z%2&>cXw!Be%%!h}pT$Qhhn~y3SLzqOfSk~Odc}<G-Wh(-@^Einsu`FN{a49W^9~_f
zf!sCBlKByzt%u^<l3R-A0MGj`6gtv-@_%pjU?0&(*rdU=Hb*+T?UZWcG0Pg)5R-wK
zh|_lWe_j>tlE_GU-Punow~y=(QyTsMo7N#ldiKyQ^t%KW!_kHFRz-gOdO_F~5ZT8H
zulao>$KQOe4Ftt%$XONttuxj@FR%x{MZ+{@ZNY6MlQri#Y@CLZbCt|NT2b3Nt8BBI
zZqsY=762^wqv92oM<dAWHOnWCG2fo2|8}3=P$^K}cMHqlvt(kD;maxWhMXI{ktZ?9
zqHh)syWS9gTl9s-K69BPM0DSVzG*vkU|ORmBJKjP2R!naDM#7Q^!3&3omf4FmC;7W
z9iWMmsem(lksB+R)+~;YtPgzU7hvcwv)re%oU0Tz#ge7OGwXor<O%qq?Zgnmv|=|e
z29g+S+?*j<ZR4OlN-+O!*ZYXX$y(Bc-~f=L_45tfF68WA1Pf?yz--X0+vGmE-Vj<e
zpV}$_Cz;qc@cz@TZK$>&dVJJ9>A`c>ZTN)Y$@e@dm{>}>JEApW{<-!(Fsmn%#Ad@M
z0N&l@TFGt)#_vU9_JwN%?Gc^%8HN&?Pn-iM+dF`eHDuR<U5A46dc>ALiJeyi&>3Hq
z<y_e*DlnP8Qt@C@%^JD-^D|}`!>Iv^jrW`lJ8sS}e7W#P`|B&SkA#eK@?ze>H}xtK
z_Ps#2)1QJW=<(%yqjYmM&NgJdF#CNkPffk|lV1Fm>(~T}7*E}*4VD5EXyO5!+1>Tv
z7{^H{GL@*<;5o^?4JjVnyb#O0Y}pU@3oTGb+drh1jKa5WVVa7Ls=f$A4?1lu^t*sc
z-Trj^q6UV?nk`e(62_P>8;KMFwu-s5CVbv>>O%jO*)7dk<ey%00zGIOdU^|l&hp;L
zhyZ%OH)-h=r$%`bYcH8=x5VMx#P^?X&ONqgg6Is)!VK{cK4s~&XI_fqAQIt-X{|)o
z)%b&DT^rkCdEOL+dn`9reyQzM0O{)E<9j3PB~Z+gr;N!fPP;wvYm<iTYNwSFEZ=+t
z5f*XFe)g40`*3@#0?tS+(8%@fTly!L#B!gCuAlF7wxMC5Av*f1Om1@4F{L_0?b`Wg
zAzAa%C0HJC?W>K*MWS!hjNQDiB6!qE=wf3*RyA114&;<!zvPI<pi7lFfB$>pYi0*%
z)&>9M5yB{{f?p*u`#*boF8~m*L(m>`340fud}LE~Gw^+-Q#pz4;k0>C&d^cM0=_;_
zpIZrr_QIR6ve4E;2ETSGw4P_^1I(oe-%>^eh{#JuFupHh(bF8hKKPePH0bAxciNh^
zxsMQDEumlGt6tS|+B6_;%?|-n1-_k51?BUr(OnM$mbMl!E4H6=q))yeQ(k)qQt!21
zIdKu1^52`jY4tkwW}s|0T`8a1KKL1j=@tSd2U}nY0-Jufc5Jk0hPf1aL)d|a0&|m9
zXT8p8@#m4&Ok1zfFEd2v$@s;4SlxHs4Pv{rogT$uzn^r*e#3p`balgKIR`Fwm607@
z`1tF3)61U2^m02XGRhD5+9tD?wRTD|V(rHJXoV6s6~;1BQC{yjI+yX@gi)Nh__H}V
z*KpZ_5LeUNp1G)i4Em)cJo`zpWYeBOn-54MEO<}I_0w-)ccKI<r)}o~pS{G;eH-~o
zvaYi2coSbDe7-G*t*v)l1W#0H8~WU{j^&NzRN1(2sIO$54p?Z@8%}N=IBh-Y!m^89
zP6wXM^mq4ycz<4w&ppw?2Xs|7k|?!a631sMrVqsM?99-2n>g+b;xG7xeI_4|S0e8H
zViseF$d}Ep!wtfO*GciP6@rt(1~}83GF)xtL|xBPVI4*8@#VTS>mi4DuM)ncutAV3
z?&ig_8JMLXFDlOK-ds6<!|m?ZOXFOF^Cnk9>Z{X;{H)9y8h3CQe@mJslE))V{|Q$4
zGSeZ;|JZ%!`BUA(9xH==@XmM0O{4Rgnjye_90B<keR}`cSNus?&Dk6v4B@tksMy?#
zuNB<-UGHAngiCr+JNTUT<GwIoV#}phqPGsDe;~@s%S#TBCM3II_dbg9&8xtd)92cI
zo9}XzF!Yygls^;Lj=+-`I_1J?CzX}hmj;L=Eay<kdD3h<O8~jhAkXIQ-6C)dOW}vn
z*1#jzxfk{q!CB_MA65Dn>B0A}$jau|rks1M?{y-pO=C1gO(0e*;!LilbIx@8-t0fN
zotTv&y@~I`ONJhim3Yo6+)I_Rg1<u$+nYYDRCB19wAcvE;w-3Er^l#6WN+{;2fs0t
zE-N|kqj0i!Kv$J!>qK{9ZR@P|__6^Pm3j#?A+B$kan;#BC5IznBjkO@Fw@&a-to0b
zJ$qjOifnuTV;iadYq4o8p4IyA!e2H|x>eRD@=_M0GB2I^w&I1}CDJza%EWi2sIWM#
z7`w^!O2M-O^$TW>3An)Se|%Y;8CbOEg_`L@ENkD(BX;S*m+ZG5rP$s#8FNe+m0v|5
zIR_tAZUii<V}_MD%B1C7r>RF;T-(qc%gw>34+51aSAjsin6RpX!VGi)9dwPnCz1gQ
zX@YC;_3Of%*c%(BEUbz4`F_m(V?s1geqOoMJoUlzd_=N8l@I8)8)g@nbu1d5v*qDb
z<uQ#RK3mBEg)8wP%-Sywd<OpGiW76+zbo0lx#YMgThG)%wt|Aw?tzbEt1Ev4)1K>*
zwVIXWrJM-h;pTfuRW;fhc*Y@86V~g$l?bof{kM{o<;^iCsYmr>%AsQTF;6nKPOu5s
zF8>6nihdqQv1d6KeU^(jU45k4H2`PpHf`3ycf9<v;6gdDjpjdD46&fn$qv6+(}tL9
zlpfPPg|D6zN|f#lD97`A4i#q+O!s9@=^eDV^1@a_>yle)1dmIu7V7rtLFdx(SV?D2
z=JOjKcNWjr{(5xyq&QOWYcYS|nbLZ{qP%t0&u3K+o-Iv?59!M&5&koFG5zBlRq#>Z
z42UN=_zfT+h*#b$x!!3}sx~gnIs;pcnB!Yx4hElIxHqVzDwr5J)oWYNMmGJuH`?`K
zzw0!Z(d;wQ;l4(p(`new6x;6_*)Q%H6NCSTq<1$~cwb{?WP*W&e4nc~jXW;HEy`r0
zS1UdRp~b?loEJl_aX2^^Py6TdWj`@v>mYVKf1ei<i*Kf+ToS%LwisavUp>%(ygJN6
z3%o21?gNuOmbNg-DD_1q1=E-?tDoxlWEchNs}-ap;Fw}2Eyc$h^Y;+jlT$J7BRZcx
zF;mb#_@`&qL8jf_{|P)vB1aj22c%N?Y|_#^CRyi7n9c4*EG0AQwf7(7?4-}u=Gc_E
z+Ce-cBx?nb0m(WRT8Vpnd+8;&c76;Xm%qV`x#Di6(w|svEr7pXyy6DJh(Tz+%Of=w
zEtTy+xs6?^n>D6!0<4bjQH@+f!TlLHR7AWK;z{b~rh{Gfo;=hj3_r?vL0@?m5welb
z3)AZ{oUDFkg@IUQ79Wg?jweVmx_9a@_wwJJc2g0zyq9<vVs>+bC7;SUXXx{}S7(+e
zZ8NVUAc}pzIg_I<R$5PJ6KmnR5w;HVqDie1+Qp|<i-yy4Y`DUL30-ybrb)qtXYv#o
z6fPY^6svS3r8>4`^XT=$B|(D1LG3A614<OUvd4$8L-f6&6^7r+kO3S0Szp3?WIf;I
z4PefMOw%~#5=W>>;XNtpjW9^vH7QO)QB=IX!aG8oef24M#{aQ(wA6A%7NWkxC_ieK
zq_Wfv@4GGhigu`?Kt8}c?hC5Yhq280jsLXczSMV%Uwx#razZ@uQj)FA?$4hlldxt3
zIq!H_E@zH@MOata;#wC4jt&6V2&r7YP<7_uqH(LHZ93X_=%)?%o$_?=D>!`a4ai;9
z3np%6vt(?q>(y|%%<-BBhIu9YnEaOXIq$`nLip_b<)#-~Wk`Y3p^3lLYhE{aKdFLm
zauFXnY5#;`4c)vKz80#GqLZ9C$~)KjmI>buEAD>h-}(r=+v0|~YOv)tq>y_)!rUuW
z=_^3AHhKHbq_nE{dglnzWEGL)CwqI%>uojyXv}M1sLw}VP&JIuKl8fMT-FDMA&;0K
zzt`GwY9#GPUe9oRIlqB}HhC?=?0sW?H2Na-TO@sv3XJhPC-azlYCGpmc*!;t<^YcA
zX|+^`-AYL~@#uA>Zh)yeb2n*eP4Ji9@i)~8e}<oHzgmM26p!W=XY8fjz{xe9uh05J
z7GX-jSojIia2pHVPZCFif@1NMD6uE4=iGT7jN?0>HHhz;q8i<eq$)$Kytiz$h7Wk@
z!s8Due+S+Wb@n{w+vGO`ouQ{Duz+WsG}LQRlN5o!D4vD`%3WQRZ?pqC<vVX;4Bpcr
z{WG*xx7fvZBcEqSjxin9nQMMTY|V^EHf1s@WEPeau(PL)#&YNSF0$3v(cDJ~9!E{Q
zn~Z{~iEwVFE_Utg4P!<IBn02%0HW1U6ct?5CMC+bbg#K&H6M(yL?1!8x5G{J-czEu
z+Qz$oh+;I+T@+l2<dWOGjv#~*0$^ZFIU5lI*bJ#Wd?xoto>LM2xC_U8;6!LA)5d=R
z&E>4s?Ld?0=6<8o$|EKDF|wL6g4xfK;hKpO^$SNi_tbY^37j{&IZ@f=fmY`%%7H45
zEZJHI(w@Y9P63v9J)-n2*p$0>i$<aEHEJfhncd8Rx^#6CeDq9qA!H62X!2uE9*hOm
z(TmMTmT*D7eg4*kY7Ra1d9=N$M}walx>yUu^<wRz^t%WMcCSXHM4%d4ZVZrGCVW|t
zi4v%vi3#n{;yU$5vD)g&>p3L<!Qybr0e_u6v4aG8-QE=fxgB?Vhq%)3)A}QPd?yM-
zC-c<vUDjF!XH7%|XiY{X^6DKB54a64yUaKN;?<vY9>zKcD!=uyF%#|WT{?PJSUmf;
zIZuS5Ip;LS<Jn#vuJiQ#s1N&;m))-yN2lBjbB+w?srQfF#03-OUCi`?(+1OxZk=4i
zp--luEyK>n`|fU=Lt9D`L|{uMsv?~^H!!Eq8G85!z%roc^;AaBCvkkd6j6G5yc@cl
z1ZDtmzh?Ffqy`pqx8HKvk(7E`^|>K$>H%-QdE*(y4Ho*tvBV3$l-#Hw``9#=;cCIT
zYZ}Vp#i@K9>;=k>spqf}-0pmy5iUkF{CdXo=vE4k8cb#~3O<(J!Lvq&<t9suT5%0h
zV$`?V%}!2-<082%VxsDM$s|fTGG623^&A^ZJu^swPqa7IzeSmg3@d*gwihLK|H+Xl
zocT=ZMt(s4g;_Dsqps`W;~{7Q1^rgTucC9QyYu;;bQQ}a@hZjAVMX_<n=IUUMW+U`
z(5)Ea9*b12PK{!2lR7}^7M|Qt?lvykks7vQq)UUTT>KoJrqiYvYZM%Wj8`~dG?VdI
zu><0h99%9=*9{_Hr;U=u+_}Pg(o!<wa;qs8e9BQau_H&(Q+a5m^bP?d(Sv4!+duvv
D<jP!j

literal 0
HcmV?d00001

diff --git a/fluid/object_detection/images/COCO_val2014_000000142324.jpg b/fluid/object_detection/images/COCO_val2014_000000142324.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9f9e5b6246eaace56b13012b96f42ec0e06c0882
GIT binary patch
literal 67199
zcmbSxbx<79*X0l_L4!jW7~I``fCLZj?(Xg`32wnP!QEkScY?b+!QEk4e&4sXTeW}f
z?(6FQr(bvVsr%mTbI!eQi*K8N&(h*j;s6*J008Fw0KBaLz5+fXARr>Ze?&w?{PgJ~
z5(+jd3NkVZ0VWnYHqjRlG0_(wkc5JXnuLsj90;W0re$DZ<>24|QS%D&un91+bFlsA
zB`}{peL_J-!AC{KXCnoYvi(1gw=Mw2N0`kI0kAM+fDafjuoy6JeE`t=cf!N`*8u#_
z0rLSC4juvV<0mBK_X!Q30UuysVL!mZ!o$PCz0dZ2zYl=JfX5_d6+ysKG(;qG#AXYO
z|NW6%w5A(JY3hQ4-N-5E6A~^SKEW4CDry>9Iu1@QZXRB~uiwPPB_yS!l~q*L)HO7<
zj7?0<%q=XfoLyYq+&w(Kf<r>Xe*6rNNJ#vZl$?^9mi{L%zo4+FxTLhUuD+qM3DVrs
z)7#fSFgP?kGCeapH@~pBw7j*wv%9x{@bB>G^6L8L_U``S@##OfU;wcH4eR~--@yJ4
zTo~`TKET1j!Xf?#7t9Cu_Y)Qa4xW@10aHW~(a;f#j4kjZwrKqCn(j~J>`E6nMov>m
zxD*^)l$Za3_Fu^U?|=pUe<Ax{!2UO`WdI5+%zN-)F#uq|Wwfm3L29vKgL>kkm#-&I
zS&7z&jopEi&Pp#fEhx3wT<1c<b;}=AU{>0y+fRH4vVbTY<%KP897G&yNEP=MOZVEa
z?+GmU390gKda;`m`O|&(m1X4owZt$P20yOL?YqCs4;lmBkTq8rPW#%qvJM<$F}(m0
z`27y{2kIDay<hnx8a;PS7S%B%VZh8I-K{(17uzMNv+yxgn3+LZES^I|uf;M79m(Wb
zss)PI@G69L!&HAn5?8Dp6DL(G#CIlI*<e6pWxrz2Oi&Ear3(oeN~M<3t&Etz-yOQc
z(L)%h(T<*jfww>ubrE1VtNI{OYLr=XXMJRA$h9J#9bnMMt>WjtlrRmKV;rO~ESe2s
z_o)Jm?h*0u5H#L@Zue#3uf`b6Jrf(xZMg`Qf*m69YZ)0YMSaJ!X~hexJmI-=!bh^y
zv~#bHqQdT?5j%k<b7i38P=R|5kh!U|nsfNUvxu+nvn3vTLow;z&!*7^|LDL?b}#R$
z0%!JzR?SP!C@%FJ^F>{EK3W7Fm$h-e%nNw0<j8{MLtH6)YiHyUxx);)bz}GY!2+dO
z`C86o6CRW=T+rhvm3BI!^C%9nl-Jmdo5DXNkA}_!&`qHK237I5o5c%v&^h=Gpq1yT
zhr{qpNE}@c75@`+2J?kF?FV_@2i{yX5l$LTaCoCQeA0K;Ho4jXl{2K*c0p8kQ3$HD
ztjrtWCzm5d-3tH82g=}Z7**SR^MCY4sDAen$@E}MalA3xxFf|VG3uhU*MT>H&<%-q
zmY@lMGg_f$f1v-OBVnBX73&Qk`lNizUzT84T}P9mZg3LEbxsG~PML{W&yQ%bv}$y#
z(f+n#6fK&Iyt1*;4v-uP{gGZOZ+`f6m(TU^PXv53#qHPjMg2QD(G?fWqr0)toLu*Q
zb`QZ`pu$i_TkTlCFW)5G50mc>TunM+v?Q<U6(mWY789i9Jszkph6;79YBV@LUSgU#
zMDC%y0lurIf&S`7BMk`>%m=`E)&<8J9U(|;T0TGow?M+qnz>_~1dIvulH1?VD5Q9d
zyp0MY6&d~SFw4!a#vt)-yXH06FgywJ^^Xiu8&p@{@?spL3{(O?V?m?*+Caxr)t;lY
zQW%3ePWZ;{;PFVe4OUn$-0oKk8225h=I{fRCBbB1Gw3pEI?WiCe*rt&>JDW?i~}Z~
zD7B%5Yf8LlO`*O1sFMTuGTA6jz~)Y4$J*?%jH(rLw6t<^@6ZbzjKc4bV$W%f-$2|G
zc)~qaZyVUz^&f<Nn$uqKZ|JX8A!KVNd^$B)6mS{2;ef$f7PCWN%EI=a_!KPQFY+Sw
zVB%k=in>ikx!YJ_%T`GhdrlE~*yadv)m`!BpZjOj-WbXye;3*{X5Y&U>gsy~>{Pq~
ze35+1q25xpd!yt&Q#M!Ytp7$AE)Q>W0TrQpZT43(ScW@sb}s}PF5s^!Rr$%|Q_Hk%
z7r|~6Mzsd6ge}~}5TTnxTZ7tWcDrO6YRC4S_>|GOi%)~_+8q*z+545A+^io;FGkw?
zYEHoS)0c06m`Qk4Jf()#x^RI*3J8CP_{Kg)VvL}p7KqBD0Wt)=Ab(6BoE%@{vZjxe
zqKYB>q4Xgf@WKb>DdWa-SPqe~E-_3RLdNN-5xGTUL!6w1zshfV3PY^n1wXUojf^`J
zZoUB;@t$VM6;|eDcz=|u!=S%jZM>+GA9G;<%rb_Dky8i=gpT9I1I;vWr=tAx#0F(g
zw4aotL_C)O?~i(xT2@(3Oa4ot87o&O?6`*%XQ)Y1YE<8|4QUjO2P5ZH!8UD)_4Hp_
zly9Y;9DrLD@<I9)=H11zMCC@01pDIy35b0rXUpg|=D{%*c(&eO*w?kYLNW&lJwsbv
z-vCJ2JO+g6G)3w9TS;*V{1@5)KWrFr*_3EoXOz0a18~Q>sEYs%FHy^F*{CfNeOpRm
zRBQ$o#=`+<G@{IyVrC_85XEhZaDF*Uk}#usXQ7j`01k;<_H4Qu$esz!f(NL7jOqM0
zt(!E6LTRdBC1<(pBAx6|n^r*S;h96>jdD|Fx214y=eRn{`klNk{AbtI?8^qT6(H$X
zvXVw{rlCCcu&HE+3SGr&&ej_MRPb$DM;=;UO0wI*o>L$XWEC4BS$n;{W__{vkXEm$
zxnk(#DMcyr>vpio&uYvBXVt@Ovv_yc6)p$bAecX~2EHJlcmq`aMuTOiChWKdVmdyd
zU-y53As3ItvRlr>N8&>RYnhbS_3pI<0>i^l=K?`RJCCa1Q<~klf*Ma#B0bJ^^-FS4
ztaQXWeOc;u!pdiIk~M}RZ*JZ$qE2aHW7)EDKA(AFAESR|k525S+8x{|n`di-2Z0)1
zoTq=Q6NlTIn%KU(GsHcH__R8Y^BffVKRvg+0e);Y%`n+BuDD#%3v`&TIe732y1N7K
z%2#A@N1+(HLfalaYs1N&c<0i{TdH-l##aSq{OQhxML>k*XAOW)2G}LX<sz4cG8`Wq
zSHAHeMm4*uq38HfNuBxgYFE_D>a#)d;a{o36)EdtDQBM8(PWmDm-a%=wY|oTJJoi+
zoyHS&4W!T^rHIOi4=?3@dK=*aua*UK!RACL<|1jf4TKfa$P)H+ynkAdjEsY8#gVU3
zVv`=8Q0AI%tPS0PPP!X9xJu&BjlGRqK_3V04|P=<%g6-WF?yNPxrP#$jMDsDdY_<r
zN&OTx55_vroP8mOHBmj(j{DkSM>#i>UcP?y^+#Xoz&(?LubmW?)1PC>?cFI}nvcAn
z4=QZ6f>JO^Rt|U)Bx~I=Z7G~*Z@MSs1dn2a4B4!vTQTa(qgW?n?Z;c_{~5<@4;YlH
zvDY*`)n*kzVMP}uV2VDpv`JwRjoQ}7IY>P2+*V|y*%iugyIGn?Rm&d7MT}mffWE%c
zgCvz-%k4g!>Twtzu`j-&U*pVkmdThF^Gdv~64rWjn<;?5LtQi6xI@$#$B`~YoWGEr
zuG9s9i#q7ZBv%ja?I&_8!@OxJXK^)tq}V&GHtQ#IQ)liJfyc|Ow6G2vBOR{7@rQ;K
z@n9U!-vIM}cAeVC1-IjjckTR_DhPRWeNy&x7^1U73_uq}DQ-Ocj?jM-2Wo%5f6LA>
z|Mi|73yTwMzwC^3w59Xpfvp-qzRvYEgb}a!pq^G+HmUpz+ZCY=R<UMy{wPU_2S1!%
zOD5t;c4rFKpM#?T3XP7ph~WD<cP^P-4<pN1aT!zNg*{Ur|D7)X4OIrniL*L^%#!Mb
zj&yfNYYEvKKzXl{yNunS(s4Si1jMY^0=k)NH3se%1_W2Xl&<7&x<bFW+<(pK85pX<
z_7n-;*c*Nkq$^_x$37_?OvsIY`39rqB-GF3j>)e|oj6WN7(G+xdIT5;OaDs{{}?PU
z5#1r|cENKH^~EbqbNBHhos=1UCKUUWOVe*CHL8jCuSSzOGXJgdBv#D+&jJ$^7h>mP
zbKw!s=M2X~l{2!$zt>!eR33C}EZ!ii>3UiQ-sfD$=%s_FS<Juq8LU{>?nJEW6y+h#
z9V863-x5g0R|vU$5zew#BN9a}(SGOZJ~nbYeT(Y-`^|<v@K|o)wBV6itT|rA)*Q^D
zFA}r%T82ZUa{~O#9;<aQ&v2yly~dJ3aeWapzBBoYCAYx~gaRs%Eht<;FZkVZiY*wW
zCXG8l=dqKEHG2&GyIZ+DleVDRG`({&DZ(mjX8g;MESGgkN8>u&RI=>+2@8=3sruvf
za;??AI!%j=J1sKW0!f{L17cN^mXC1RX*jXNc=6UJn7S8m44sRzyGM<9DKZMH;!KRY
zzrL~-A5K#uw!4`qE0@N}_}t<O5G1_vqeHNy$q@K@gnza-CXUtm^9|6t0Sc9{Ma*F<
zl!OnUK-n(0Mw=+pNsbbCzsk!kgMO*iZY|L_+Yc8}@V%n>(AR;dt?g+iyBJRnw__%h
zbxfye8fLLh9>wHQx&IP!aWj4-67SBIc<);lm@P)h+6%ML{wc|tTohHp$0ji@^aqN#
zn250@_O3vApvVBXlkZVmWaBJ2t<~|l{ivG673bUf{@UCdU}<0SRR2{IMaOaIAmSE;
zq%YLz-(j7$A9|`8A-yu>FuGc$Cog^V7bIKXnMT^&{`}d@fXZTN!JSY9Ha<t|to|Iq
zBd_<mbFu1VAv=Kp3sET4qc9QYi8#&$Qi~)@x3l2@)_DU&GbHnGI9zGOuxvqIGM?1Q
z?>2~V)8=LAOhTP{R;<mV;+l^nil$ox33Y1ccA{-EjA2?-{M)lP{XOm{3RJtYP77m*
zep1Tzp$yX4zakab^8*A&F=oTbx}d72&GCqHof8Lg%Ck>182vg3_CJ&968_|noO&$q
z`>CCvy6{D1>!mo@o7O=ZRj4cuLIREtqi$dL<{7u?U*u}k6-Mv4He=YsDio2dp5g)i
zxf&fj)L3$Yf{Fj(uS#E!3^gpD2E)jnZ3`k-d+D~dG57MDVjWE}nv|oiIy%~|^Jf}Z
z%a$U`kTW!m$3L<{OoXDEH|IL*Sa@UhO~8TWf-*g)j>V`-J*SMEoVz1rk7jrA5~#Pw
zw}s>qDxJ+UHe+XzB&%4T`^J_8(FEzg%=9<T-^KM@tQVzV7ICo=2NllTj)pT2q<EUT
zTNjh8PyIjzMt~!@?3PZ`mlN*3jr+_h-ba4hNLJ?vbH?*yuh#w<(cbmCX{Od>3J0^0
z<CW#(nkTtJk-jGa@*i2vg=)UirG#e_oY#wrvR=o$0Wur~s&uX&XWA?+ZAfFh9f2ZW
z?Ztkgu0}c0e3;MK79`WYW%TR9$I)D@4OrqGuApJ&cz)ICMLm-92~bAU2wQayzUik7
zP0DSk->|i&<DipbWxC~gwHC2q_!Sm2!bzSdfv+SKVw74*5@UfrRx<$tvpbC5-agID
zdk_dG!-AgK#0DRfzE@UvF73ygA-ORmILO<aAvDWlY*=0VJ4@_JJ~~58gL@pl*}*Sx
z%<>|%Ns6A6+zj!BfU~4(o~1V=6#RRdEt%4f*5}vXqc)4(ryALHwAWarhc4JMH{2#-
zP;8RMya`~(5UdX{ZyIA4j-EEtD<Sm7ue!z#-3XZM)ZZd4%b4bD=UN^hRGGsXLvga(
zuvbouHPli|Y<nu04fhA$jg<a@Cke4JcQ8Dj&_5I#G#2H}TQ1eMgvEEIe>vja4%?;w
zA$xoe<fgwN+4V!n$Lf1eX|UA%QU_pZ<g%N<oTC8$e=2=yAxDbr`6WcNh8Hm*5~TVL
zern((>69tyk8lG!rP5rGH$cE`(W5s6_p)lP<_(ZLN2G?$^)rQJm{78`rqVgbNKWKQ
z0bY_KeL`!NL_byfm@HSZIkn2(W<DQzk2;NW-qeBnv+xUIgK0(i<WZbwQ`EGK-B#Pe
zI(7-7ofEoW5vQwN$4EZ5*&>_Y2wVvIy;~U6C2o{7QgRICJchR?%2%>*B<cv`GBXS#
zHgy+xP+eN#e!#=%F!@8V9_MQKe6Hq6b#W(U7y&O@<^EGAIKGgK=CtiOvd*c0E)Bcz
z4DGY)?Xfy0&y*@IhYJ3d3V^Pm%^4SB%*tPA7a!BD{su5D(jkpmH#h$megC5Yz?es;
zDI6N`x<~<gJaL;3X~mttT4iyn`RzN#I_W)?x~b4<sfrxhrCR>i&Lnw6#)x|H=xkq%
z4vA*?ICh&3{N+1_6~0N($OtjQ22Enr%hJKE6-3&1-p%-R4!otQjwMF3RIB-uex!3W
z&Uu2u?aWGP0kimXFA(_B!xIwn2Izu(NX4|m6J76d3zOXWMY>oPMlr=hZ%AsIz(V$~
zd1CEsV^Re#xt1!!u|-{2eSf*z-5cm1&F9Ova8f5U@GI1aA~jjSO9{qg{)CYtY{tuT
zEZBrbLM{vPYN=+uG=NK1%(D_@ek6Z`ef-MHMBborCw{Ri!NSibx1MYK2AES@OKrTf
zjc<Z`129<zN}OB^L;>@__prFjf}@y-uh+`23D~`|&jWhG)Pc9RFIFM~G+g3C;3ef`
z@Q+&#L<Q%DwLzX`Vd6*d7!*6eL3RWsRYUy`8a#y({WomBKP9xY9~>MXlqvAo#n-h$
zorFaH#&~;edZ*_ZIwX=ev|aR>2pK>A%s&XdB{*F8{b$RlGqMVE=Ic_89zn)v9PE@^
z;>s$Tf0p6ly|pp&u)D1z;WB&}&D#C1URwHCSDUIz$y0S7omymHp|NWB*5*$Trg`cB
zbkMn4Oo=(%{DMM;f0Ci+N7r6@+SOq2sqOJbj+XA?xZC{s$UJb|#6#}cBFf|qQ06qv
zeB}5BSiqUmTxTznG#;ssAvfCL**vggV-MC@y~AyyMQJ>QMwL^0JmHM8RSzH5mu9-`
z2;Fw(r!m=PQ6^iMONiS!hpnu#2;ANY7zfM3lofx!TpST0tqCP#i26^YU+G7EzB&t3
zS4=EZr_&Nzri-cUIv~Ol#Y*2qD-5I7$_mc<iGCO-5KVscGAF;PBvft<(*N`o1yd^-
zpBb}yLMJ%3%1330@AHGUVytw_vsjOgZ_*z_ic9XNoK(Y9fA^ZF1?|#WrVQ+2<1Dq!
zj|ZXMs1Oq`Xm(av;)NoXEZ4tu`(I?(gDC#L4-b!JikFG-lp49lE;ZYJw>3PrrNQv3
z>by==(NF)V{`}y=ydpY;xY|VwxjHU_B#X^dg)NA=+8`V@xCPI|bWS&0(}&cyvTaLj
zG?RS<FC#j8p`5;yHAANNW!YZkMC+|e<%K`#H0@+URWrB?r`1@?Px`c+1R_%lR}-21
zuDFq(Vh=Q*>L*^a8>fX1w=zcE$O#9ge!Kw!nZUpWF#+kuGPIYl$kCyHg5atH9cca7
z{M`ZrdfK(vyA*u`1UuhYMIKpmTCo30UQ0PpaPwz|HfYD|;nxxGk^RjeD^Q`pAr(GD
zwY0)L=ZyTNr!28Hfq3o!)}L=^9*H0O*Z2L3BZix<wA9}RYl7&>kD#){qjGn0!Q?9R
zZ<{Hv%1T3;X1nLkWu>}QtxK{d1rpLpj~&7BJ6*4QT2rvBOcgSEO^rf(SN42Y&anf9
z?>RZXJ<)abOz1Mc8LT(wEU!@A*B9KO-X-W<V=Z(W*a=bD@|m|2MAfQ?Hwx3C9gE?G
zCEvqQ$x1iQ=^5QOF;ypdSv5${1=jDF@nC_8Y8|z38`aghF7nl!+M<5{LVH<|FZ2*J
zh?4bN$(oz`9sUrT`7*|Ld$8P?rE$-ykYXHDBM~&tGnz}D89aC(+aLzH4hiv@uC6$)
z;6m1nvH%EU!~?oi>CmtLQZND;{$UCx`0im;QEx;fY4KTr&BjK(4L?0<=1>EgcuJ<f
zX|l$7J|T5zO5uwUccCcjZijJu3gn9n8%3(rU}%$ibI%rE6)Q;F5)V|{aab^!Z0rrV
zbDd!`2_-F*kq?xYU?KF<Vw6CW)U;z+Vadf}0!pcPUTCG;Ok04K4K;fU?8z-jaa&N;
z=QUx_tCx(LOXnCv>SHrGzc@O$Kx8kzL%1)QvaTukcws%;e`>MVkV?f4UivvOqR*z;
z5;$M|O^X4!-VqDqKD=iuw$RBRX|@KU!C|e!^bO)+2K4P2v><*1Og2b;kS|%XRC5+^
z_)1is0-#+zD?PAw9#mTOuT#(v4KEe3cR-fn7spvODRDd4zg>|E8)WuoU)B4|9ze93
z02rYEa}gX8^KwcN%dqP!>ztzPx4_kp)VhMi2a|r}Lru)Fk_MZTZ7SOiA(%M+y2zJB
zT=tm{IF;#B<N88BreRn%>#E0bobAwxu5~rNl5zM=g4{}%|BG*H@sMGn<|v9`pih9W
zbt%Rhps#Xq=YcS%r}<QrnIVt#0N}?{MiHNexs;>~fSoX_yxy9xD_I>{_PJlc)Vkn0
zf83dXwG_D4GoXyGF5i7t_uQl>^e$%BI~%by{8^rDL<_HCPO81|E(q4Q7jGT@xMGOM
zB$slXEwcG9>D%9xWd==e$X*uwE$gqHzs1zRrd0$6KSK_O3?Jq`xgOu3+ZyQcY*LVW
zJ~9OOw6Ty+Mybi()p&ce$#ag>Pj2Npwc?{e?)S60^JcmY>2p}2s<534kSC=dX%eW}
z`~{wK6Op&#ZAlfCZUgdbv=HB^9C>N-%fjN^{3Z*X>SZ@$*ba+*qR0jL64{+m(+w2$
zXbaKdFI*VX)kBD7NGGSFnp*v~aQsH$AByRg20L-Ozxq;6#BemHu(1Y+_jPdX0<Y+&
zzJ2sN9+G&znPPVQa{_uaTY5KyX9;JO>+^yil%eA}r*D7_@sy$O{&)w%*y07q`tf_x
zf4>+9purq(9ICpas(COJ;5I&=vB~vxIp(x4#LpC1w*N`{Jg~=A<HQ#>1c-fKbGBRL
zzu;UB(xvZay<7yY`?M;57t0%RB>&m4J`3PW95uN8C#E{jDX6w3kTL45F_+@D+qmd;
z2iBMV=c8<k+CKj?UNk)NcTY?nS%t?o&%!t6y}~yD2fS@t1Rr{dvrJQ3{jq(+a?2PV
zR8vyL7#7s8E|v;%wZl7Rn>jW9YH8by;M0HzeJtryd;>VBvPl=tpFc2}ikxk|Q?!@`
z?j876+(s5zKFL}gm>I-J?*$#B*yqR`Lml;SJVof*^l6lu6ZyCvd{n(@n8e8Lt{?e&
z9FbwH0@|auQ}d<`Z)tj5r!3d8Zx7q!cQ=?Oa^OuN<KB&M*@NdeFW9^}M&u0;Ys(o}
z>Je^#lFhGSUe^$>OWBsN*r<ViE7L@R^?Gvo^$VT+w)RrfazT522A-X!HpXR4Xibil
zS&r`=y$Fh$+v(NI^iW1t>opW73wu6WJqcxMXEOVp5Au!tWNmgn5h-xs1aFbIMW>7e
zDrfo~MwvfF{QJ`e!Q-b+-b9FEfsuoGI)OIy3KX9c5ZHM$-&Toer+6p3iXrb7k5+^G
zC$J~EBuz5nI~|GAdBKHrzZRSt`XLeX$UoqN4Te5kZa9=_E4H+9Kf}Fr21RX0t@{4f
z#Az(WS$uKKd;>_s2_FjFE69?GVTo##e$xy#F$i*?b7{`}kgYa{c=iY3U9XmGKh7Gy
zKqhh>EWgbR$3-t3L$LuH^^&UU++PukmgK;~R<z_ZcV+Sn@#Y|wuNW_LB4uO*8RzxU
zZvYtNm*IklId$uwf6!m=DgOe@`nj^_FM}59*m1%1qcIw{2iQyVb4DK{M%(8crsK0c
z_hNH7n;SCD#|{>K>SsO>koy3PZI_Px4vGA~Fd)bX=AB{U2PM&sndoH=?ECL2b{BR{
zkHl#HR*B(YTSepnvGS*8Uf26~$=dz}I*Y5R!uv`do(ZCIW_=P6XI~3|s=$0=EyD$y
z4;o!Xp-*Sm@o$zlKKqL7JWdA_*f+P`J;L++wn>(p^X;MvptVBzGu5;zzGT|f1--a0
z=uCsp-#csE<-+p{TAkhIwE3B&;=)|5<eR-QY>@_Q+Ad_g9zggU#(*49RBZWa?e-uY
zB{|}wL=@J%IKfM)+2cJhi8x{x=9Z&00V|#%_11vr2?-F&Og4a%zv{JxU-Qa?ea<Qj
zpCH!;gK_+D%MoT2{oEA&%u50FyD7MPIg5e5d+9MWAKLCkDoH~()5X&wLdIEGhpEAG
zg2%i*)CC_EWQt^&`c=PD{N@S(FY)YNKg_%3ZEGalS(fKrQ$*2~NwP$gEgX-6Mw<Vs
zo&B?F(m!MmptS!bz21?;_<OOb+(OOaA-z|&A#^LuBC0&hXmzh9I5y?%?(dh5<YjSz
z){GK=2N{rH7Mj#?riuS;8MAz6^N&8Y#387$I5sRlno72~cHr!lm%4Si&Odcq2CO5s
zU`hpWdbxz!+Ji}u;J@1HTi$45`ivAyCH*zBFvHdZI|%^jdRAS%NGrciXo*R82WbT-
z;uHA*iwB6iXiq~+sH8}v{9?izi3H}4LuWta3^~C0g#qSf+3KW&amGx=Lg~;{Cs3d|
zo<MI0kPTkP)+N=fQ`=sj*QZ1I^m2V=lBP!@2y1S?cVeA#sf0`<_*=%qSUWMvHUiB_
z9(ko`>tQ}LySZrC>3XA#b*aH5+#Rhw=nZgt3>x4rJmOag<4^sqVmqLr2@AXsKer<8
z0(TdepuWD;;e3WOhiOFA{~CL6td+t_IsFE(UKQJ2D$lUp`ZpRMeIn@P_-@677;Ly~
zI}K3M?F$5eu3s*1Dyx)}sM|CsQij9C)y-CglyY}2hlrlgtz`dkq;)T^rZvz96VN4y
z?2!!kN1^vFaL#HolsFk(k3r02Luytw@ShAW&vjx#Sc}=>uL<20xQ(+`{TixVOM+dV
z^^N7qcM#*2j!)O?5;#MCsgAc(Mr?n`4BSHvxePwm(CYO``-mjje<3bxTcF`jx0|E(
zi@hXS!u}kgf?rp3M=5BxwLZf^oxGFxU_FIasWiWwZE^3H8=T{@_S2M%wXHO2a2B~y
z?02@N8_tQ9`2BBrIcXwK`%SJ3YmTnX8sQvo0EW%L`{a_ezTF%AOVpF%z^re+j|1}3
zH^|y1iPFD9zD7p64%=>cQJx8<R<eu6+BW>t$X6%UaOeX9n}{2h(786gXG!p;FI_kk
zIrUZC{M#s?x&``SMbAzt?X9zhnAvYX7rD!Ky{E60sfr44?Y#Cb{pV{P9DKI{ydVj=
z&iL|@O*M4vi8@DyN>9M^GibUUueSpftZ?C*p)*)}D$tKc7$ER>jSm>)2)+IG*etkU
zW{E@iJSsYnDjsc{zpC?bp7!ScdY9z&eHldHKN(wqqBSluQHdiF_QkKWt&`|h!WIU=
z7?ZDb`!&DC$}BOo8LI3qC4P<oJXWGBvR9Fs4J{aKZ7L97>Y7x$4&(1TvkX6S@(N{v
z*)9L_P&6Gs`I*C%F~>WsehySF`NtJIU8i7V`g9@hwKNa){_`Uz-un$u7kj9QfA!fh
zN&h<HUrg~qdp+frci?R?6EM~3gVg6YfHt860sr-KQpcmK4cbl55%!tvNLQxUv+_TF
zoxe@3TqpAhk708=D}=2VpH5hBIo7B)2RY{Du^qn^(O@*$PL$kN;0v&&w8(Zhn9Ib|
zOIfF(;Q`@pb`P=#APo@6OnJ!BGexvdlem5d%%0>MK#)c|VJ2KmF+YN4PK4f0YSb9T
z@!Bk5H_crtNC)-gAm^83?6Eo{WAe5^20?&i4}=GaO~x+`hjFjX;jm7|E_f*Ia`YzJ
zQ-3U2-7n1E$sT=$bE_#`O%ao=w|M|Szv#bVB|l%Nt>2_cCqc^g=pDAdT!X%4gI12<
zmr)8$X?UT<mK4pmr%2!hRt+aB@W<g*Sp|JdAfno&`c`5QSM{qI@aK&B@cK)}5r^j^
zMP_zkD>@Qimf-fXyXR)kphYELOYA3BZXw05@5V)M?(M|uwd{%1yfp(tqKn5Kx}u&z
zK3c<JON&5@&Qa<ff5bJ;UNi=sgfSuQ<mTdt3hhpZqb44Gv%x`BRL%pcMng*zq~>#}
z(YLRe5kV%zIy%12&{GSMFIodlQoq#HV5%j6>J1NGs6xI7%Vz@w<=b&~wC0XvbPyi<
z6roPNhF!4_@GjxVG`AINp#J+i#Wq~EDG^v5DWR`+AItsDz)Pp=zr@GOG%tR{TivI#
zLOj8H<wic2i^b%bK%isKrfa3bAz$ktI5qkDMEzHYE45MOzA5ZE3f9bXIbXy&<ns2l
zjwJDW#2yiNL5%*Jc8mu}#9-&8lRj=*eF+&v;yU5mFPyb(%`jf-966r-3=F1l$5KnB
z4bGA#GD;Dq%Vq~c*xQ$*%j>qURthT`<x@7_$ka3mV^r0Fy9a`SLUW-7md+bT3(1Z$
zM5VUtn<iwNyUa70=vJp~f&~+Wx@`%Bo@8_i_y<6jU2la-1o1SxWIe9BMj7hpdDJT6
zEvjvmm#zaib&rvdt<%gMh`9b<`i@<J*h3)fqd)V~@M8{nDCgv1`t-W}-`1|tfh`)C
zPFPu(T!GMYPtc4rQs8-c$iT|8@o1X5qr_(5@!#)uO;N4Vcm#v#?=szdjzpe_?4!V3
z0yB+~$g1lw2h76PeI80?@>Jp<;7uzV-IB*VwKss95;|0%A&00TQKIQ2H)&|#b^=H1
zwxa&n+*P~&W9vl|4-!~^bUhWjO>dlu!>8;GFgQ6lhVFVOf7x0yO-@LKw17Ms7;_t0
zF2|fZeGaQ*W4Ywk)*q&BkAEWPH6I&gAX)#c6wy1@tb@4{E3fsnc?x;dLy46lfr_;k
z-O6#cf(2=qeRn66{^e2$PQ~_*QD*F>DtM%@*9Ef<l-u=6o1kL7czHi1R6gcUi8E@I
z!onR{zmV?Fgl}x_iBx+!u(NTf8~Lrc{5gYXm;@$g8d7740_Doa6QOWiYK7Q^Hu{I>
z4_KBfA79a~xZ=h}T|lfmi=jBLviyBq7uuE%gQyuj*;b)Y9DKDM!m>=fg;p5tv>&5U
zo)v?~3d5Xzk=M=6&=p*$OC|rusj?s(EaDwk@nlwdhDdyM;2r{Ro=--#HbN$`*uN!$
ze9|j=npFw<W8Hxy3{)zCf=B;Q_7{nH$PaW54d*L&5`#I2YveJooErtJXdGtK*Rl|o
z|C~1bv66~>@^jt|_CQ0h1w5>2n0vy;IK=)zno~LFreN^ZS^uJdm~?Izba@@!mXqY)
zlhrfmg=f$cpgx0bD{<I;Q;ExrVU_s(^QvQZ7t+zqL4vi;s^4w*@#JJ`xh}=vw8#xr
zoIJD_;|LxmjZfV1j5l{Yr=OH~MUtZnwI`@7u6$`3|8`LC*CvJk4#1mHG2sZ@(8m?>
zQSRPB{1ZO@E1er{{oXz3ZUFIN60Sg@X^pM=%zvE50K@5|CP?(9&B@DIwQrj3R7sp?
zIoJ)%$IDinxNn5~-od~uUo{`V;5mdqCc3`q_&6h(6n2P%?ZvL-nGpZeP#HRZ5}{A;
zIM%=VWZU)5tocOU29%*6nhX0;I!pqFqc*Q^-T=ocg&szP>Y%gSLccpHsdaMEyklW>
zn%@CTn$PIaCM)w_l;d|O;Fe%;T8SF|th;^5v668NpSJ>>RTKiOr}e*fD_T))a)?eG
zJLF2DK~7f!<nhEzReLM58-_*fE<I@1|Cm~H8YtBc10>F~AdNUOeXgkXqj;IqR?~k-
z*Tn@Ad^(7_pf8xI-(O&=G;Cf2Vn~+lF{AuFY<+xI7pxH5JyxIm@u+4d{ORT&J%=`n
zNtA>$J>QwIgA6tBzOu9XB%$9UL31T_Q^k;}pMt0_>Y%vmRA)CFg7G4BYc2j>8LMMF
zw^_#ZuP2H1>R8Lm<d*Y-CRr{f$CM7Nb1S<Wkg8#!Q>ohKX8O+LY#9|npLqV!4b<f?
z>oE4eh{|Y(p^O&;2)hrQZtTBK-jH{AMxxpgG76UO&$RUnCa>f17>RrAUU#2}X=n7G
zU3QErRFT=5u_dt2I1oX$!)}oA@A+(B^XZ-6JIP`M2CwuT2cM;&*h`#J_AdH$X}UYH
z`xqn6b;L>jeVFS~1&gc8(v0rne@UVnFLQpuKB==7O3^|b&YLrh8OLSC&Xm<Ar)yi%
zdF~`g6^^q~kA%V7R;O!yF@I?8OqzN1x;3#uFCOzUpt}Bj)1ScK)Y<H6)2hCr%b3Dy
ztTf6C$*UX1{U}&$o7K&8%*6P22_?y^N5K{X*$(s$huedaZXQ$l#(Tl>p?fXfj*1U*
z8xxl(0e^TkWMIgG*yE2kvKvaOMtpuwvIC}v+j26pPOq-`E>ER80+LgB8&r}--vH*Z
zg7WuPGeeBqto^%r?DQ;fOsv^_&$Fy2l(2d5Yyg6V<LPXMzR5v+TL?)IFOk<9fX`Ts
zGL$j;%F~{ZIlWxS-fQMkONteu5tnl#d4+y&6&mO)z`sCAjZrg5q;3z&+0*jcEgES}
zk`AyXQKPt3LL7Zad{Qz#H^vf;9&XE4raFy(FBU0IZ1Pl2b85O+E@g=yGES-EFRPI(
zb>HDv8h7)VEaxnDCHfJlX2l9~`wPvjSh0S*ac^4sqMd()z)LW|E9gbTd!D_Re@_Sg
zq~HBSeHZ(4;5T1MV?CK{(QoH_iLLuiy0a~LQlogO<$q*xuH$0?ey#7i{{2Uujg|<d
z{OGxQ)}NZ0{Mfh)C5i1*8%O4xB{?;9O=f>xTMd5Tv7)oY7To>bHqdz%QQ<a>dYXAY
zhZ5jN<B^Z`ai~XXcZj=Kg@OE5j~Ac!4M0|M%sw!%h7oKRoj;Np<rw^w-kt1}zPzLC
z#1-c_I1@2SgB&~{raNc%J!M>fj~ofHc$v5K<JYDUr}epfmvG$AhCr%K80z<&L8qzn
z?cF+O5rdE6pY1gD9GD6abXJx!GYb5Q>r9^SNkH`Y3X_+<6XDLmvDopqK)c89=}(aS
zeKX@zqMoI%4RzS*<YR<c9JfFoq8)k>ewAWc<8mvpb8;oJp0*4>glRGuzEA%!oN&_)
z(IU>@BYG8DUsa+kK?}ShmyD;?-1BGPt^sYSQL?Ree;A7YiCwN8_*}6j{KTkU7vA@8
z_jL)G-S`SLq#7;TtHnU~bwp48*4N2F3Yxt-Kge5=FHpAie2Au~*fuM!fE<*7-kZ>=
z^TjQ{2{)a+Dm%OpG16?c?i4W_yhMjyh{{H*1VWX&GAP#AcC#s?h5zM8*!a9%Y?Z!N
zo_s%SPsg1FN*Q+&cQ+Sm23N={-tZjFOg9)7eou&Wjh_o&YlRDfEc@04Kd2d&d=YFF
zGPUg$AkH2rPOP!VU(s3NVAJ{@bKFGN9p*2+Gx9Q+ZCx{^EYF7@og1}EW4=aVkogp>
z6B#8bFK0=Xbz-xmYc6YTkZSr^C)d23tw0|2UMnIht+rAeG~VTq@0@ih;=FOT>7uW<
zef*)vs_rC9sm_||AUQyr+*nAVW=_y+cXzmPQHBw_RxFfcH$P1{mrftu4d>lNZcqZ}
z9J~Fb{LQ)X<eR|SG>8eDg2GiZ`1KSzaaCM7HNEbiz(qyHt8)N20LSB)ZAdn-v^FP>
z>3w)C${EBy{>wJ8tC62Items%#Qxw?sAxyw(SWL|D=v{Z@*I(7@(ZgQtg`*5$t6d*
z<J#Y~-OvVioHgBNx+o)%ke9?H&ovk6ZZD_rF5>yoK=21ngRku#3yU3mJAyR~rAXDX
z^xweNePH6_e9~<f9{F(k!2;5Y--pm@Tw`wfc~1Xxpe<FYRJAq6xooAO05SBYIt^*)
zlw#AJN;>Ss6K{%7AR+G?2<;7!LzdLl(KroCOG*-cUL2~B(Qw|%k}*VC96*2h&^)A@
z&8Z`WKX@V8^~DJ}R2kbnaiHQpZpiV8*b=7beG`SdB^L;@uGht{07zesmHWN2a%<|f
zJ%1;N@Fc6ky0MQ-R@z8V&1vjV#!}YeH%Kr<#GOJympt$jc|l+2EFgFM^cw_S<eH3R
zo`ln4o2n+4Ei{ieXTFL<6Nfre9w3M4-ROWrmL2HR8{kW<Ny!h7QED^f_{RtXSNk20
zbSg#IJd8j{0^*@2m)5N1Hh=5tz_~N}FDz%K)lKuWXEo#K;~tiKJ+ZHrScG?`;{4JF
z9|FH@3ovmHc>6vfy#eZjiAEhI9VcVw<1+{VSW2wl34G4}Qw8hi=uO$%8GPPO)qaTa
zXP8PN{*#LJXl}v30|1>Xx)fjeSBgqOiN(I|V*-ZBD``h+hai#z&NBr7Hi{5aDYN$y
z^>%j5Pf3kIqNO@H+!?!xl;Fef9#p8~lN|*=uwfXSgV|2`eFOU4mcMo`LjQrd)ok1<
z$<{eEQ|<#YZxm$Rp0s)DUYAkqj%RKQLq;FmfkXbM={vU^6HF|8cK6io$I@dG8y>G6
zM*aodo~rBB2d_o8d2ayX($6!5Z8h1YMYM@A4)4PBwp6wl&4kCsfe#(p&YJGYg;67Z
z)*8bHvPoCOaDDulxFn@u@!=~I0yzL(xvS6yL3SF88fYH+UPL-7EZ0*&CQv1D5!Qg#
zpNwWEg60t9kA>0f|5O#}On=nuxb6TigJ)i>5IUQFxS2<>9DN-1ODmA+6ol=kq4Pux
zd~))aW`ZwJ3Vx7p-VWM%4(-UC^N~I{=SQ~~0CT;!bK3uE?pr_^ob1?nQh*EF$LtsR
z9`g|y5<8O7T(Hlh61W!QFEV&6ElRgZZ{Y696+8L{sL)KI+Nbw4^?r7@s20s|C4F+7
z58yyejdym~tuoP%^ZV8LZ$92e5!3un6ne=!8=IlJ!dX%@b^5ePs<;rd5zWPN{PTB=
z=$M~5U0LScU4GXODGOUQO>xaagTBzNU2iB(_pvNWdi(4e_t;XucC21|=K@0o&P%c1
z<WB`BZ4{wvFULvr@tS?`A(J$w3|7Ee8BJwD<4>X{)?QE{9j&2BGJn``v^I+Ki{O#d
z2J>gw0VgNRQS#_^Rs8IrVx(<$GWuMzjcW&fr8ant51!NyKe2_Mo5a|YmB&8~gQT6Q
zmLe_#2PBG&g`@YUW|U`c`r^iZE5>p7lwQfnswT>5Yq(bge>0|Lb8-&8|1n=VhVeq!
zK5&s&(>;DR0EuKJ(`R{H4Ze!rn3(^qw?Bui+BW6trbos`MWD0i<$8JpRT*_ypv6!f
z3p#efTvU!S0rlqe6sTQQ?sskF;7@d3m#C52yP6G%N#^JLRem7{5BXX}MM6={BlTM1
zgI*>_+TH+de7<OO>rLT|+bM9Lm^7>3DF=?~1w?G&j5|)DmkJieCqEY2`N-+6fFWXR
zR;l@0Gkn}Rsr!8!r$C^?qr>Fi7=PN7amyE4nY4a_jdEgh@LjIbT4wJ|pJGmkuQxR1
zHFfGmomn9$ld>w1T=|T81DY-_Kp45J47!?Y3^4IX8t`B<;qq-!jrRJ)C4+Qke`Ar4
z+(fZSizK1<+Ft6<7^r{iVV}XHJX2&>zovU#s5g{CH{)OWGmN3XCN`<%j-230$;J*>
zE9QbB1y{CM`==hSrfTe^?H)vM(2j3God>j(fwjx*(oBB?%lk68rGpo;q;_x`pXwi~
zbJh)Xyt*ISgd(<k!6a2N%yWpv9;`ZT-oaiIm73-?EBGC8yil2Ee2tKzLZM1Gh{5*T
zYucm*tr;Nh#A?l@-wyo#bOpW9HiO+Cz-Jk(y7X%ZG!$HmNlwN+{(yQi?skUqX;`-+
z=Rir)&W?gjN2iVzK-A{4^OQ3lKFn!CQGTY8qHV3Kq>+q8A@?;|IiccS`;qp2xwVg%
zSPPDNejc(T7q0vfI6Urne#C!U{v0hKU)vEevTqh6h3Ax_gav&%d~`XFFe@?IEc|VB
zjI#zg^4qPsSG&D?-!PZa70tG*l-1tJsASCB7GInO$1h`rS!i4Qnk`G)N^Hs6;x1{F
z<2t{tde1Rxt2ftYkuB{G?2tK+ys{sk#tvjGYJYKhcNy&b8S&<#)v*d*`=|uR$e0cU
z+y-w2Th(bji*w^%X4b|e2xF6Telu*OKB>$6`Brp_`Edl^G!%;*0FxN_0_L)a>?r7W
z<**OHZ6)x60L`<-JhL9Y`Xop%eWfSoi0;<8dS8fiHx+V5wp)N*0)C`RU2WyWXF1&j
z%uDHRoy?@&C}sn&R`I|USy?UPz+YRSW2JMqkK2C3sd4o0Acd-SpMfr&o1UncrHNyW
zIuTFQW>yA+9q&C~`m(7VwaMje)xKJ-J;xb_Bmab^V{LqVcW0K1%Hlj2Esxq(FRW`&
zk^Fo!q8>6<c4ov0XTL@p&n9Mv>j>=ORmt%qG-(L(I&UF7MAzM93iFf4F#e{PRhyQw
zxgmmJ3%Mz}8|`w!`EWjv@C!CUnA>pA;NXsG=hhd`({&k78NN3Fa+Fz9xoz^wcQ>Tb
zHoM;2L#4wXHf*0gi!ieXbN<4`@ryB-0)aS!#Z9VgXmTev{a#dgh1yoEzY<7*%xH*Y
z&E$V3Og-`AzxHUKSKZze8Og$RM>`%)XE$MKm5pLzjs^_I;4P#ld!V2dHyg%4e>vXq
zm%jlf$mfG8D#+{`zX#x@`56!jY5eo{R-Qgnrs*{dJZ{RpDc(sqBnwot#G-Ga#_xKk
zofvlCMk3j5=m@gpL=OrrIFZE-OLbf6Ad4YEd(Oj$$66Rh;8rW->K&J~uJYo(b6>}r
z9MaFt%F!DK>Dlz2S-QP{8$2kQ2ws{`A16suBu3|KGe%?g^u)nA6}pmU{;gQo6DX_?
zYLqK~M&YLA#y>p8YBd`=m}KTAh!Y2W-w#1ZoiWR%)ig4<P##Nh-h_WrCY@fFwc=M>
z!H;2PovktOzMLrFW0b0Bl5A8gd!0`(hEz?uOdP1(5@<G*r4h&=Am5{j@PG~qs0NMy
z<j7q>GynBSo>2&|<gWjP?8IczQ&K9xBnz&jo-U#jL!MMwZCy!Gu~9KyhYvU%BxnPX
zl9ns#_q=bRDwL~)x*yN9@IsXs7>auxjk}({*xX}Qv{0u5)}o|{w_=Thl?W~VmdSRT
z_5?^Ef_Igv>fA}Mrua|QSU-!O4FW;)fj?6)RwbT?lvf9K9zmC7Y+4jt+aCLL8{X)@
zK&gM#!^SPr#jtut6I&m&pOBgz?cy!b1X3shqcEL(V>$XLI|@h|T>PoFxyh|?m9NdC
zb~4`^Oc|r9wK9S^Q_l0Ltr;o{{Pfj!Zp%OCbf~f#96SsoB-5ITuHczzS8jtV0QTa>
z<U%^ffiq4yd%_9q-EN38vB#<v=ahcL5)AF1%7koMo!sKjBK!+TS_moP@pO{k04|v-
zKaD0n;lBYqqgm5Z5th?#Y3PK{MkMgb@$s=y9CHi-CyFO(@qRu*tpon;>CsopW~?``
z@A1}M4UjyDLxpj2t6#fjfQ<>uAlq^nV2ycqr>;`p0Hd7buQQStLvqsMtdrUGfPUP|
zYKO5wA;_z>8l#!E$6YxK%eeUV64W(29udqD{)nZfghC6Vz0XB9?`tY9GR?R0l}L4b
zY+!jvR9pp58LhqUn@t?FeyB55Q<7BC^Hwp{oS@47!_~P((MEw8hbd&_Ug!Kc&HdAU
zcP$<>rVE#+{FgXtbZ=Dwzu~w$?F%i;)L}|Wih^An|EUp60-RossMXjX)fiDmoSw=P
z!Cp!2Dqu{Tns_>}XOU~eAF4(2*GhmS$*A#L(#c<`AvgAeS?G*)4S533b=Y^?2}h`-
zY(s}?m?Xi0*cyZ<z8cXFUNv5a+<Xkyv&#+qaNdCXAeOG103fta&CmwABUYf^(rU<l
zRW_YT%5|6_zdDn_><q8`Gh&iFjrYukYoa8nV;<|ZS%o2<0_l*uC=k7l)jGTpl(i#o
zs3F#hgA5Pz9M2T!eBZT8;UyuF&jrnJN+=~ohmQAN*{U*5h`x6iI3B7)|00Emwg&Lm
z?jG<SYkbN1dAdAqq7vu%^S8UP0FNN;&&ipQENk`kQ&K78)rqyn>j$-bk88xSBE%+R
z9cZE#v#a&-uh{VKHn0~W%S#|R_E@V809o<Mwb$wS>eM7U`{btirD_J;NaU*d%syup
z)AyoIK}4;ki({Hg##H)uA5J@|6HAi~SYsUEv8&<+lys1BV9n;?*t)L?gXMsQxawqw
zC`iAQB}fV+cuguI7t<sZjUBmzioAZY@geB9NE9Kz0T$nX4>2rLp@ZPBlS&dlmlE+o
zp5WqK33U_O!BCym!hf(V2HsH%*_n*p32dtFFU<lwjn>ok6#6FwDn29Q8?;aA*`9zA
z49M;cL2+!~-%=k|le^+77xKMGA$M+4YojIZBiz!RF;i2ERkY|JFOZ(ZmgXI~U2$Yx
z)D9wCqI$h|AX9EtfKSs(*KXBC&K}fRWo*8$19jvWQ&>tp1`kQ{c0gi=v(6uA=~$4}
z$}QXe6X<?$g_E-7V6vd5)W*P65mOc%gx=_-y0Z37qT;aC&!x$A9oys{^|s_$(%M+)
zbRq@e0DR2mH27SYiMHnoUw7?97(bIE-#16Q8^>T_Xrclpb)JwH(bBTZbBAx6cgP#D
zLmvzO{JN#wefK<=GhULMuh&o~EO2P9X&~k2vBd;+GO?o(J?#daA{A_ZMhx9|y~f=+
z)xO?G5f80JuM%*E{*^7YzB;UZP3KXP39(P5hmRIgyb6AKo=0sx>Y$F(;0^gB4IIHE
z6L>v&cFd`o&r|DBYN>JVp?9X7y~o`xsNBee1K`@Xbw0BU+DmV=Wm`~dGVZE)JoM!_
zzJ?z`ZcjJJ%Gti=FZEj6{Kj%m`beG(?i;+Km=))Udx@@Z611u}u_l76pDXNeb1v>T
zDhrsBH{OTv9K2`3iQ=7jmD?&Ts}`j{za!kuM#_`L$?IA{>y4~8=ZZ(T7UZd|dA8B1
zmHk~WofS@3`I`k}3zh$dmUJJ_xXOrE4c^tC6|e91j87APyUPZH(X^!xZ?PC#UVp8+
zZg&dZ=ygIxY|(smh#txGNjke7)9DW?W^PA)SA3{`l0!;kQ_s3)Ek&?<>Pff6*y3Z*
z-RX2;&86UNro6SwwXEH6)2Jspwsh86c3zPZ?b;fVc(gM@8uRjLB?I~OXNGDir=eV|
zUjMR3&p{_}LCK5uCyaFgV(Ib<<b9oRLJ-@Q%Gn@@(|D9AdfVDVdXXu|r+_qhAC%22
zvZa|=+Eha1S*p%w*wCWOb=xuVg`R^Q{8X3eJm8h-{J5PMVXq)JoP|Qn7W6oV8%HOJ
zk(JrUQkt7-UL}A&&?5LjDy4V6j<PvhosnK_pTTo@aNQ0OkbV_Sgu>mFY6oWqPHH(L
z@gr-mlL@q~FbDy_DbhXpqjRh(X?+oulN{{qY|RwzTsjG(vfb2S;zO$x-|+chJ>qI{
zil^RsEjy<4IHm_?l<UnFcX!t7Et3a2g{GT^en@PFz-~~rMQUqXHu#E~q<u^<GQOJJ
zjMXoo;dHW6PK+?S%j*xF&2+?~d5N~t()G=e#<eM^Ln858Qd@h0l>gN*mQH6~g#(3u
z$oDjjImH`UBjlQ18dw*uNE>TS2^8koV1WfK6Q!~Ytwa~S_w#%DYwf>?Slbi*bP3{!
z*EMD+#ZHXg7zJh7T4B|HSQGdoIv{C!+rtgcGJFX8<|?N1a()1i^HbBQ61+g$DCQ{V
za=wu0*rovW9d%Z?&nL$X<7z;512V2y-&=Dli5Gd>Ld3>02D+gOR!86UNE;f_DWmpr
zbbfHHq!9lOMg6$a@@TtD9mR~qKQEqvbDyf4=F<S?0zIjGmT_iTqbNdS>&36&XK!+N
zu`Dobw4hkD;pskX$!$fxbE7WgoBiEus-dbKu)Wm7GQdZiBrMh!fe)-)r_0P?Y&)#y
z`Zq3EI^9wnOP#={f?3A1m~XlOyw4{U{@Z4|=%-6(%;G7$a&5Z;LCw1&`X6MSRa9KT
zwyqn8Kp+rEaCZ&v9;9)13-0dLf#AX2A;H~g+@W!IcX#)k-uvG3be`83YgE^&u9`LH
z|9{h(b_+9D69!U8a#>*~EdS!02EHB{;QrwqbJI_j$L||AL4IMIy08!UHc`p1Rw${|
zgIS|%U2}4y0lAmx?u_5c9meT0?t&h8_+cvvjz!3Xuem#M^GW9fg!B0|vYxF<@D%6S
z&}uYwy{oR{&$l__PS=N0?yET8Qdy$zT2znc1t~K#G)M9$)$^{^aX5c9uH<c*(;^>a
z%Uw-}*lPX>&a4t~#3hZz3&+*4*-bw|8+*?@rah|ek2x(nMcW9<*IYKv`KEMmgrHWL
z6Q9H>Zx-tLaSVY^1(Fa57*rrb%<;BDnB9)<bf)YV;haINkh#@d@?~1x5iasl1aKwQ
z7`PTn@nzFXgWXQJ4`U5d@O7FVIigZr8aE!f*a3bje|{QE(>f_OJA1Lzz-IU=B=JlI
z8+K{ubwAyAIoJE^zD7I}KY<{I62^}9i5b^xF=_Nxh}4nh1J~Dwv6ETVpc#%l(@4ci
zcHI|hv(X4?`#jehY7c{4E&}1yV!L<CXGyPjTB)iEgJvvBJl`|k<BmC*um1qwRffyG
z$YONHmG|7~_~Rjb(XlMVX(aQNlz-KK?4Akrr4Iua8bO~mzHF&GahO&)$cQuQ8KjyJ
zkVQsrenp}#sLrZ1q3p(wb-La?xGO)&#g!ZmwIX5O<9QXdp3iWe6dTl}0l~R%3<unC
z4BCJ9VShg?k#D=&88yX&T&XFSSB1FJ7gdd^#A?Wo7exCP7S!aUewI58*ntQ6H5I3N
z*(*mRBN5^&#!;g+Yaq?UDX&d5l?QmLKD5h!on=%2FuGqwB=M?VLv<hdDO0TcG%-ih
z<$V@rjHOy6=dg2P6lu6n#l3P(H#|VI(ArBYsh5yMd_M0iI{!t93{Q$cw4b&Mdb`lk
z6Ba|NlqVZhO_=`4xU#k?l3|oFq9X3s1<KOG4}PIFaTOGvqp6xM76lI}AjxFltz5tJ
zmYAV0k3w8bvmFt6pVfKyT;rysNuQlGKm0)Eg)PyS3Yidio(vj(NI`d<LOvvMLc(70
zY0wY5pcIMxssIvKuL}P}%$^|c*en;{$x>(?PgQ3&2g{gala|;xxh}i;VxC5ZJRO!<
z``gWoo^>cmvvW^Lux}m=-%F-HMZNbjyo%9Q^y_h^B85M|_I-=sAAsTmF6M-jBPAo)
z1BFL<zwupqsQVS_*d#dC99)up33UGlzz%)2#<Os1!is5gnozhCu&sriqFwcdL}E%;
z7I4Iy#!-jJ*Z#`d`5=1FNskv4MA;h`?~WExTl3oXm-eAZCn)_Hf#*A0C5R6$F`BmO
zTvoaUgUKmgq^nTRD|sOb0Al~K7i0~yNqo2ZghJF-+?J;0zEdT$<)9q$$2pBO^hf5j
zPPHjL<#iHQ8Nv<~w$pOiM8l(g{-0H0S40r;cD`jk8!HWkhxl~S?!d4rDQ3Si^?a}+
z+2t!3EkDSd5y!7A=DMh@C0;E=ifZWTtKdn?)E{jQf?(q&ci(sQ!Ip-`^kGWC5HmO3
zF;anf2eloIXFeCFTYJ>__OO&7SB{)N_8BSmY#l1VJUm+F9Bzf^a$`Ck&%;1GAOSL}
zIA7_hk9{ny3w4Gbywazk{vpn>PVqR8f4|~l-||;F3v}#qT}3+5VZhgUOpv#PZoAkx
z11Ot!&-=NTuZPrjS14wU(UM&QXYG)t+go9smPQZ6<HbDMZv!%E$Y|wi?|xGDvsLEE
z1MP6ev^;cV01rZ9VT{X)kKB^Y%`@}Y%Xj7X!yb0x@tO!ebE$3I_Q3S_&0zCY^2k%g
zg`B<l#@oT%84jC##+re;5wM2@E|H+3&Syojx#U`2-g+C;>F5Fv!6}QR8hr8)-%1bA
zhRFOsK*;S((V~5#v4xT}MWSI_$)g3$>Kv1h`#h(m7v|z(pOAz>snDU&DNtBuslC~?
zI%zg+t#dA6_F-Z^!F>|QG>pp^QYDb_GkO-69IkTbA@w~lx2@jY@?Lr>cBMlnd3mY4
zBE%WpgB7ks(5lLX>wt~+dd}tI9W+qFKu*xHZ(hUGFthmiMs6T?#_r=YhkIDW)IdQ{
z{M-Jam(W`oi*{4G?%|FH6@`9j>JEQ$1IPX>i-XkC1v9ByjER6l(03%K9c-#Hf=3lR
z;n4h$@hyj`*i?mC+-1RTyjuf)f6kok*@l|>z!hWMAcZMNw1y<=;QjIHE#-pzmyib<
zn{@+myGSnwt^)`69n@RA=eea<hP-10gcE%|wvWZr@-x>sEKauq35rqj!7prnb1DQp
zca<?R?b-hTJtHQRqC0_`3^Gbt5u!#eGrR~QJG?k+7_gQSRAIV&ls}P>_eA>{4dAAX
z!0EWtTPi<7MmOH934X7VXWa>GNJ^h`V$=-;?0T7~T-`dg7M}|xow|2<<=+ajUTD`m
zDk_G&6D+>YC1c|1ed}J#ko3GIe3@?%-;1ly7PL0Ks42&n8NJqb-fQ^i?v?eMvBN|%
ze;17vHKt3Go9>2sZN;8XvpfgpXm1<Tpl{AeFLc3sk}Y^#^vWR5?=Eq-vHc~zR)f1t
z1M3k~5pAMw#I!Sf{*lpy<dW3tp1Z`0=&&h_u?(&@|2yFL+hg~EquJTe$84wc80Dw*
zF>mNmdksJD!^|=%yiP(#5nc34hdr1dOHbCxi+{*Uz`oNffN=BnwI*Vq)7%1PlJR%<
zfY;7+3-EPu-<i?~WtT=o7{6<7PGYOqf|PZ_B<31~?@s+d+tXSY#hRI&RA$FH%Vrzx
zwIjJ1><)++uf=PNxvE%<b(%O48BwB*CS?sc)A$=_qSxquNr*OQYV=L8bgectmhNai
z^{Z37OTR{D4&Bd_h}uQ<Zh`v=?|_tSDR>n)M24Y)K8t-{Bm7p_Q9SMU1KST30#-yr
zOZb7`7^D>H@uMc|lF|U;WZkdD-V9;-o9K^lMRL<t%R$L0bd=wU#;&V@1e7G=I<i~8
z9s3bh^Wud=SE>Y56@Dg{PX8{4v{p`H6iRUt9<~DU)3a`;3{h#Es~KLqA6NN)`bjm2
zy3r&#b|^jhUoK4?sT{kHc$CN@o~x&Iws$G#_&(3M`~~iZMDIyMaMz+Ul|B2d(NBlc
zQE1x0=6eO88P1u~%W<!+aX_WdX^cbVvhHwKhnMd==YrJNBnP%{jHAI)h}9Jqlo^x7
zRxadL74tNSJ+je}kUbJ5Ue4b(B_lSK1Ud9y*gg@A$`8Zbmd(U7nSP9e2%dZ~>t8@3
z!Z_0ZB-;?VVv(n(CCV>z8Cz5u_aJXkE|0yU_vvZZTkYAcMZ57OZB9F8&;t!j^ijJ#
z-0XS{<wi4Pf>4u-nxon4?;U4Q*6BD%4WKhDy;=L~DLwFfe*U8*oe^m5!Xe9P5dLOn
z2DWG(eZ`)?N%&8pti$gSY3&Xb?#wrK-5=1+D7>VVBSWc?k~FLFoF*8~a`-ae?c#a1
zs#p{|*h8Q@P1n^{WCdKvesVG&(h#Wsl8|*0c_v>JFaiTHoWyt5T44-glV;Pk=Fd7s
z%7qx3=z}G>*j~lGi293Zr+<jP><~fExw{kcW!l|3%(QRb?>ItL34oY9kjr!dwCMKt
z?}5W0Hzq_;W}tAdZ#bJ$yRB)G8*tZ8O%?1VhoQHtTA4{u2s40<FkvwF3H<;DB8X1^
zU10&O<}bTFfhXW?-CNBk&-NTD<Q3bRQ|K3f7K5vR4c`<;^L=)e*!s8=dXPi>g<G3G
zf#5W^=Bna=u+$SsuAogDTu6@O{RGEf?t_(uiZ9}=oEb{#X0(%4ynDc64Gqi2Ab$Ub
zY;}lUq-%kuU?Q3a(P1sllGRiwlzDK6Or@ZF&t1x^`mDx4yAp^otCFU?lH3FJT3ewC
z{PL0RYg*Mh5Du>}WP&I#&?AjdFHCTjUW_AJ;u8_dGY<bsHd)V0()1}cjn16DAPZO(
zCF&=HwRUTI#=(E9m`FDH_*3|I%$_u-hJEr0lsA)eS$r{Du~O*_AQ>(@C%`qU73sq6
zciy<q&v*K;b8NLOJCZn9AD1GiNnM6P6Lnw?YFosy-caHSdSVk#K6ZJtDs%f%!0@<_
z378xV%I0QfS4dx*Em!$PSlUJriJt&9TsMJZ#=1Uq$A$1dY-f16aFw{%v957VHV-}(
z&*WOmJBC;h?TBIV;N--+|3u;xEHehGzRX`mBdH!Rv+JNb#ytw{tsz_OL6s%EJvSGF
zoN*@f4a2>*Rg|@*eWjWL7a0ElwX++!%X2t*q6$qtyH1A^$S?+4Ff+srE~lDtE`o_y
zrkKe=0W(MHu>w&wFjR_0E~3iVk)M`ed|ckF*E8X)xpK3blXy?o%ZwFMT)AQRO#dKO
zYBSatin>s>jS<?HyGQrLY4RPJf1v0=vIP${F)PkD5$tai>t*M(n@r5NrUt*xk5RgR
zCCCDzse6+QG{029?vaegFPnwE9q_At5kv$YEeEB0$1a<*eI`FBycj6gT5eyv{nGb^
z4H!uSKOba(H@nwtNZsRsw&CVfjow$&?_iZpIM2jcW-}6IH;udD666#IDc}z$YxKun
z*bJX0SV5*Rrx~%7-4VKd@&!b}q^Qk`VA9#@=*9)iUIK-aJmP+VonT8U|Av<k;Kg>c
z``(}GlA95^XOHri9JE`w=;~dc!SyD4N4I*x>B;;6t6a)|X1PH0*SLuFYqv2_L_d1Q
zjySwwKSgrsaDwC#-zJwZXH{Bze8N%YIH%BWvtvr$xjI{~Ki`kOqe=$q>yBz@Ek+CJ
zajl}Kk?2sa1;rnmkbxTvSC_x%F{7x%1w7w#9Q|PzbI)bDVZeS;kdjHKKsNsK4dQNM
z%%08jT1scn+%{d)U4u=m<@)OFuU)csXvn6EZUbV{9{M22<_xW2#2o20c9uE(14v3T
zFFkcml=K73PeQ0{Jk6OXVD^ET0Ruc`?(cZDPH2IW&Rtw*uiTaN^BHNw3Oe}2gf%bB
zso~^5&V{|0y%VByM1)nWZT3EoeVOK}8oR#7F;-kH93>iq4ElhcgEhlnOAMqO5-(wR
z07{i>5C+;sRWjv<kgZubwex&M8Zr@tpNhxw2025<<Al#}&+2%2Qi}!e`V}*gS8`Db
z4icVs4qrvatPYm#`;-%+p6p6To%tFWD%v}ud&YaZQEnvK%q>>HnZBh+xhIeF7C{w%
zJ!Y#e%)u!3zilX1VfIRMQY-mDj;?oIV;1Aw#~GiZerIJkORiomWG5;9g}>#atptcF
z-VECQikJY?;F!rvkNF6G&xu#f@!ziRMuS13SZ-F5Hj=b{E7*YyI1AG2`C#YUZ72VU
za3V)kO;2&UFQ(N~j#N&Nw1k74hd7j{ycOBHzZAV%x6i`b41_mzk%aJ$X;TW-Sr$Di
zuRMM5M_Zn46>ic!>e_EXlY5)d9u0Tgmo?|Xg1I4`AW>Em5dFcGQydhR=N1Pa*Jc+x
zG)Xc!pIyM(7%7RJ2IRpj*SyJJ-YFgj7$8k5;p5mr7i6A0qLb$IwfYg+8$D%OMQ>~H
zT0k`G#Ma|nQ})1~z&t-mGt(G=-cT<a2IE!{Dyun8`NYjCl~aPF;u^t)f*qU~-C-`Z
zr@E|7InC^>xj0p9Yj^aV2oB9~Q!{Ma>Wr;Gf=PUyBj9nDaR(r>MsAmLiDx?H$b4?X
zWBWPNjAJ3EOcI7gM=TbdI<$N68lc36lPoeS^q@qKI>pn@CeA8`;}oNVKPl@{eN(Xw
zH{t)vtcB%8;r#F_h#+KjYCtX5yc|(BfP@Emx+hKw&pTlUI$0r{PggMaCv!f94*-N#
z?<$k;P#5)HBfAsqHAz_H{LNZ@7TEVyfbW)THZ^H5H+R~5@gf1etW2-sjyoFxI<h#`
z;pFU5HCN;8!#h<6?-CZ>6j4@KX6A1LvvO5PfEV<i^Hr%oZ|s=G%Psa4+e9cH+m9x)
z-!G1ygUFYg#|@At!g4)#iFLl|4~nknBPD$PP9YWUZ!CQ*oaN%3I?%iy#`Vo@C*Wyw
zK>-V!Aipsd*8O03LO7s^hM&4qOvSF2bH*;j1C#Xq$a-M<jG}e^LOkC#3>hZD`JRX&
z|C;=FVrX_ws~F@OC-iT?mpXqyTwtUr1?$NM>|gdW+QtsCJskqE(bCQfYzO^vY|J=F
zX9Fy+$?_O0-Jsgg(c8}P`t6PjjLN>@TJfsF{I2)y(S`hrn8LKJZ<0Z<wj;kAPSCNY
zleMg(*s}STIu1KL2_yGbM2JbhMr#{T4lB*2!OQ6hJx3-(NsnOS`H|-JU7hf%C=IGG
zS@eY5Rv!A44u%$QfLCaZ@s?7Z-LpY_j<aXi-{LV(lSpENEIi?Io-AZ{OB)HR3<8B3
z>Y#8zWt`J8&dyYv(95Vf2io~*1b76|Ul$Q2o`yrxH?fA1uFxQ3K2`MIobZzt`d5bn
z3mQMIX;$Xk@6Cy{ka4LKdG$@qhZ@CB78?-xz~;|2=`lt$1EWzy?@e?*RZK#&fbW8(
zl2IO?7X%SF0rVs!T<j`?TUy-3xn@Q<kAn20HlM(~Ebvi<xng)t7Z8~oG*WEjMEe*a
z)fS7FBJAEY#D}=77!5(Imkw(i$ErcZfLffN(ulYg*k0&J2<?8>S}7*zx(<JiWPshc
zgy{VE@Zx(iIl^)t0}Os*PtriDFd_y5MiY~nE2vGhp$geNT$_NQ@h71sh!RsCm$)a>
zcam>EHWYOcih>iZ@!W+L-$<s*u-Xoyv#k5?C1&oQ{5z7Ahn@zcF7I={pX&Wl4bh89
zjC<$kMyMH~6N3H%4?%vD0BB}L$F5#wVn5%@g5KLj47UEVukpadB2$@?z^RT((N^t&
z&$oK%BCdDuJ~oZYzaj4}3#K7_r{zaYts}HhvJsuA$EatfIhp)5LMdZ5KalaY+uJDa
zLUyYaIi%`7YKA+u95NcoGU~59nWOro5ncP8Pk4FXqbd7a>P==fbb+_?nHOB1v^{HP
zK`*6igK&jY5Rzyc{_`;vO)mUX2wFV5T{I`7H0Qv|hy&ATintF)nBha}Znlj{-vjJ~
zNYhf>fiV@*M%7XJmd4ZBxCv=#A?ur!1bl(HgqOgyzD%}lfE0mCMEUQQ?`SZ3w}P}@
zlGokH6Vzd*#76um2kq8z2^I~9KUO5FHy+x8Uuz&o5#W0sX38i^xj&Bmz|FXXOg|?j
zD@ODLbE(%W#W%$A@<J5uFw<CSg|FFZ)Gf!_64`$IEjer{;TA3RT$1Bf6i%J~5IDyt
z??7aSfh^*hVhQ7TJ&2WnGrA0TX;<}7ZBKE(A*$FW`FS?XPJPsg`67)qs}B@xv8L!{
z<*m*_<rX|7VN7X|6>v6D;m8X~QS+WGqT?=E5v^EiLSb*O4~+uT?HwF$xV|Ii&z^2|
z)_Brrtn@Na*tyz|RxE{@=y9Y>G`eyfz8mp$oBn8W<_{DsQ<S-a$&mzG?utxp)BP&J
zrD*DxV)KDr5dSP$_oaimsk-gk;CN`PI|^<v{UYvD*vUmph0T0fzCM4(%RfMkbSHxZ
zt%(iUCemt=tKH(A-saUW?dg#MgU4E4>YVZVcmc*KY0Z7qdb=M}0dP6iA&S`AW#cLK
z3;zII#M|(IRc6+kDiOzTPLM)+FoJPVdci6<l4E<pMLZ#`LS0CCVP6C9*vEVBqoUqy
zA&Z?Gwk5f7%$~h+_71Obfoe@hjKUnXM^v_8{P*zA^Ybm2v+3g14gGN{ZDI*Ys<v7M
z452%{;;}}~bQ*`Iodt}XBYl?O^M3%J-P{CIWa-u3$7tcSu^(3VPGS9f=X(v~*f2M8
zQkPYC_8SHae=avtI2OK!#s01R2PlnTiukt1z5(Wxw{jAFfhaP(UrB-V)D$9)b;#j=
zekzrO!>9PcXR?pr=Q;NMbcAe~t>$I6;)DI3M)0U|i0VH42Pr~KwF~{Tic0hE?$@e&
zh8Nr-!hDlA9>XGsUU0$(?J8}FBXE27dB9|&#hxmf?AaCD3<|j3T0LEzRq`&gwwrWy
zbCm)1db7Y=G(>-sndK@g7jZWy%8Zc?>{tkYE#E=AMnmTpe}v996%CvDcm%<LgKoTk
z05_d#iWDqjKj9=Us^-IRTs^0VSSA;5$<BR*Kwp%voyH7#K=oOp*+d>{=~8a*v!vgB
z!I8CtC=v#n9;N;iv+@;xzQ?kxt+%goIp*HK6bWtU9Hh2qNR=3rWZ%}d3`k<CfzbJV
z+kBK7+_{2jXd9zsHwX;Gspr5p>X2bmIr~xzh<A<t2(zS99Bx;ohhgTr&1ZSAWeNs(
zhJvSfXVe{&?))5bi>-|*&&#iO6cf#vA4L?68NS@_$#e1wIgk+|JuZ87$|+r-H8Y_}
zR8V7<^zr$?Lv!_aumJHnr2tA1*=}42MTyBN9y;mglMO%9r<M(@CETmarftFP$jGGs
zyPHJmA=m=N_<dF|YC$EZwaaU^&owVeWA1jmAkQdE>u{-O_L;M3?POy8x?zR_xqNxA
zPstrf_Hv@N8(-7|fLdQmgGYY$hp1*Po`|H^AoQ)<bF;IX#9K99gu|L{H5Es@Bn<JM
zXfVb$4EtxN<)Ru3)R5ku>2UEyYAkB=Y7oA*)8{$`boXnf+9kk*2qIZq;x!93gh<rW
zOFC<({L-}JbWIK0lLWFKN^_3%x8@#nFeq^Kq9DOl`A#o>DNHPSIQQ$v0*pf1)sye>
z%%IWp;SSNj>WwVpZXRFM(Kw!#i@FmLQb_&c=t!CHh0qY)8j4;BnmR~0t&>b&va0AU
zlLL<J`({1Xw`B$PU#B)%m;EarndNhXyu=AVu9m?2|84&$Cqyw_`2ijat1pN@l7^6^
zP*}(Y?|MR_g(G;r9fNmz#lQJ;it2Upa3vqhKY#&PS&5!e=JUe{y9XXzCMei=NmsD3
z@=|&Vk4!9w!rq{XlK8F`n`Iai<+iwltDsrgjv~EXrDfNr?!8$-vDLLQRktthx;l_r
zrS=<O6YO%bQpGu99d9>Ju?GwvklwkhtkLeGvECv4HM1VB=Cp^cD<9oLZ?~s0I6=g|
zQOE!%?s@Je3l>A7-?|jl;}$*ifx<0E4#R}(H8Z`!n=nL`k5Ztv_jrN7oN=Zn$&fu6
z>|@nmvb!{b%5(DYXCJ$w=GG<5@btJas6=oL{H&!0v-c*}%pzGf(QUX;A#?hvV*cZ~
zzCj5$#INcrChR(N85dY0G$jVJrKJ%`yPATxYzJid?Py)hrVdk0rEH@FCPmqJl5n+X
zn0FJmB^T5yoCC-(+3E*}z0dQ~Lb4ROB&-k-hCk#4%zl53UG_iU!KR#P!n=^2tVunp
z<+2cn6uP|&cYWZX?{nc!+e<c@cxZvfD*|d|ZqDbJhK$~h^mNPYJZh3EWr4tB>#>&7
z7HQ89k!yl5mAn$OmD{`1KwCeY50gD+vE?O_&WXD@Y#reY*oCk_?cHu?XKCRr5g65I
zoYq-6lqi>eAX%TZAWwx*+1r!f0r&mJn>f_<+y&bFxr{`FKnD|o!BYq~VAZ!BQ~D3!
zZ|DiK`v;g6WO+q;M-H#!PkFS$i$jKwb%E$j>t++20C~`^qA>)EpQ|4B9)7>Ly#F}@
zZ3<rk1E6tpPcS7IJ|00}la&Zk@jX+nOZm?5V&nqjY~nez0IG%l2k0i&N{GxUU>Pm-
zHX+=>A^5x>{W_Zq>uCAk*M<IPY%0*P32%mv#3Egh6meRawtWE5B8dnot9QZYT&M*M
z%C`bR7Z%U|_tjLw_7JEeNe)<908e3x-u9vH<Ltoq@K~b<2<Mla2lH3q3KNyzQPRCx
zC;mkR!J~9@-3wKWg8aS7>*wjM{7D5?+CvE!m=iLQy9bvOw`D4&X)%KbF<XUCv@0Dz
z8_<7e$7>&C5BmSJGpoIyV9dM^F`^&HA<nfYIJJ4{hbL);ROvKWzsuPUVlds<pU6i)
zE^Pq6DQQ)H`-Y%6*V)O^R(00-|K<bKO($m$A4wU{1gRv#xGvJ|_q>j&`)QcltrLCz
zEc(sE)$c9I+aISWclD{ITsi=SFek+d_tm<iJpOU-A>6_PbUxd8iSbCOQ)h$ORZ`+a
zP~FwzEcMdhKTYzPs@!SW&XPipC3))GfS=Eeu1-ND_ZZs4bx;F%^%P|pH`7fhQ<H3)
z$jV-z_|!!DEW$Vb@79VHh2{GpQuQ5DyJ_`UnM-He)Peil*3mxz^dpNKG|i&+Ta*ir
zcD6~s8wkD|o@mn>q2U_c0Wjk(x{Imfx3_}o+Q!x0FXGa|;c|zH4W3ceSx1^p$A!_<
z%b7Y>J8rnM4c~wKcge`JDTfFYcOoZ}?4>7BdRAjn7`#e$Zt1Oh-z=~CpHU<WwVE2d
zoq&LI<#%|L8zk{BY!m{~!maW@K+s7PZiFh+iBsO;{sG`mp(}QaL)Y^)n*0OTKS0UR
zN`+h=M54M~k>t9!-4)-q^769zZPw%xDh668I@V0#ibD+4LKxo^lP+kw>w4w8(#QE`
z&1yHBvhE}<Y;VQL#PSOFj<WCIQmnl?WWKVUhMUKd3^RNw!gFw0oIiISG)9=hNXR-!
z>S$xAyud;Ms$FHx*6N%CGSmgD4))xioT)K4&>k>;>A_k;$ePXSoxy#xNjP2|(U!g7
zpZ6HP{UiGi+Ohx;_-Wnxt1raqi}X896bfVki8G7;>8px3-3qJSC{oEm@pk>+*6LOD
z+b|_!XZANl>xQw4ioSxrA&h*-u3o|Cnm<1@Z)7i!vi|oUivpV`b~VmJ>#sq>i5ZMp
ztrnsh4IFpUfgRuW;jr8kGT2v*7dl52P7-VJEmpuJ7H%sw#M^njQ9Rz*NLfdP=4y(q
zL5~=(SP8uVFK(8xNH9%-(Dops)qRj-FgNwSKvnTU5f0Pb9~fffAM5Ocr>v_Okn{kx
zRBgRa(%o>`i`%so5<P@d8EZ^0*&*7g;yC&<C834!kp=la41ZF%H6EDp%_+|?i`7Z#
zqX9~YfM<=yU_!?U${#&>q_Dz}gj;BtTGmIE@c0Jpdk(AnG3%M?kl$O53*icPh?Ll$
zYA*=I#)&4aOp+xet#!lw2mb9<vz>QzBKg*c1&-Z%k-pP@FB!lN0GDa1%5*L#?R#2d
zF6dXrC+I(|QV8s+a|#zOFiezk{?<l}ZDic?rkIA%ECn=mU1{>yf@+@U6~i6!z?0vl
zb2?l2kzP9cKob?#xIdy~84!4PiTOOr^)=Rn>y5JaE#pz8druj;p`t~=v5$*nK*?bH
z?X{Jxks;TW%y?N$i#JGzVtgC8Q)yL(DowC!jJp@jCZR3;TX-ZMSyfGXUQ>n0`J=<(
za~%X<idq^DPau6*+L?D({Y{8$A8M&FoyMoL(Il~+S9SS;8gSzZ@a>$Oi}?&piXrLC
zfND>@ZZFP=RBj&re8G$KMSGd5>@xv3Y8s8!*u6&6^ZM<+=$+-M)5;J{g_iE>=l%l}
zT7s^ol-sw1q6Ju&ktZtU@#8j}i{@!x+@7ox(4{NzAb32da$tbmGy<gwj*&h^G96k_
z>0UG8hd&obGBMSSNbCCb*rh<CkG(W2=Ic)JrGnMM&u@pS3C)TmK7t(&#3Qf8@kr^%
z8V1=vc7%Fg88l@t-%HBQ?6?(M+LY_PV*AGPaxVPlp79wahHlSzng7OFi0(D75&O77
zjk-)0z2Dh5xTZgra+?vP-~R)kA7VcGh=d@QM=t*~Twi5X>lPHVh|RkkqrC8<uN|{<
zJ$NZ0wSE6vz$8zYpp&;hUyZPM3xHaY9qI<SiZ#cviP!H={mC!y+IJbRmlW-|c+$>>
zG4BMCpVP0-6xOBP7=jXi>^O%Km4;15*~1uUBCQngX``sY6Gms>sm6Mi(c<LE_p#U}
z_p{0K<0cDo$wI`qvPvO&B<I_8RaN>0AHsM4kg%0~PEW@pXN;4B5j|Y>vo3fPC1pip
zDal3t8gMB1QLipFKbSA?H6KKwhP~;egYUpS**Lkucj;gM^Fn|NWeT6PBB50PTD9E2
z&`FP!l=Ug(A^W?G%tNt{1ceu3L1>0g#V-F0@k@b!JgTxFe{ufGU(Nrl$sX_AVx#KW
zm}i5BFDH-loI5C_J%~iYg4puOw<b2z&ANX^sW6}Wlj72QyOS<_or9cjzRB(1g3Qy?
z@Ah$_gqJI8(kZO4*O^KF-p~F?4IEK8nUQKyH8n$bl`iT?FV?GE?9DE^EqSNY=fCYp
z$Ob<0HCowH`DA)#xav|88aea!lhS`HU_EvjDhB7V;C)=qz_c>oYah2M{#7+)0;gbs
zD8WSclAVfEoZ(xrh>VnpUzE4UG2*uJv#ZesntwGJ@LtF<$aCKHdr5BOC8*xFLcB;y
z3qwq`BSum6pSdpEC2l^rsQ_W>>gujZ+<VS*8UdGq@8+o`*ezrA&S*UoSk(tfJQMA9
zVP&*pSL+8QiAG1pH3~RY!+A7?84TxY7J7TKZ~^b$(_alf#OJ%i1?c;FOD0}_`x2q~
z-ctSEKyipKiN0cmy%V#ZFlJ%>4=~!XQ?+77a<dP5#D^+1!eY~yp>%nBNR@S_LKy)q
z5;a@yOPu=d_Tf(-!sx6wx(P3<kD2=#YHrG4iez3Q!4uqp&c^tC&Duk6bsMwajD>4-
z&Mq<|X$JS2^DC&hp(C*0sRA}*6tC$hwK9Kr1T0*YgC3=~qurOR^;u)G27Fw*D$4l7
zSk4goWZVy|R1igp%JLFEzmA*><dmCk+-(1MT;wo#xVxsRjX4IhL*c+w`>tbpI{n&Q
zc7N<AR^ic(Ek6@#sGASkQle|&39A<`=E9`X^RI*nrH@PGvuiQF25PKscIJM87ar5(
z?ruOPiFFG7Wv1G)RjaLSu~}0&r`OieFk2D93B#SN%@@PBA2iLydAv!Ff6zsQ8pw|H
zL&xJ>_F0Mrk9}j<aVKtz6~5<0DX=EU!SLA5z85Uuokav0VM!Uxc5|#-hdPU%TETdI
z#ww#Fw1}KxL`G%y>#R`3J?x7vs?ez0ly;V6ow=%nPCcaP<~7Czde4Y6XL(=!2bc~@
z$cPt9e!ytV#NHv+HS(6RlA$x!W^<X1ZV*uZu4K+BtX`RsjT3IC7RDxurgb@OaaZcW
z|Gm%XX7ZP25{_sOEs-Z~sWZG+%&=bx)4)CLF>+iG3TeWI4YM%c%4JmNvYV8@kXHCp
zisOE5WyM6=2VsJYs8!?EG6LZ`S{ut9m8oElFqqDhfH7znpq9F~I+dnCUq0QAJ_JcV
zxSaV?zjP{Ib-;5#gx|zSr8P=sQVLXE-$Dl|P*bTcXoj4ytxA=7Aq<xhO;zbtJt0>2
zX?~55t+^aUH3`tCZbiw_uk$35EjrIvo<nyTe8z9}@+w^H!!kna@*8!(<IHe38z3Cf
zM>n;y!X*CD8{ON$(KA189wLcFI((yPUuDN^8nqo323e;#QbW5O>U0n(D&Vi-$@iWJ
zbsJ)-=P?66X%%Y|lnkESmYi4qs8xDkLtu8yy&m@i9NUoe45tf+G6~$H)xPda4_vmk
z`vK0q(eG+g(iuGzi1)`Ca(1wyYB4I0!-s~}qCY9?P_V0l+4XeW<`*n|WBVz7yRVJ*
zA@V-y5@~{Qv@DCaa?FOSOg%+*uB@j!uZXjZhpS81;!#Gi=-^)87(QI|Kg0(zol*Ss
z&dGdl^G~ee_bJffp3UNzjh4e@!F7c(BJ}4k%Waz}C!NX9mR0IE8A9SUiMW|6F));+
zS++Ng6FF1>ydv0n-Lw}9ExCVnuv!W@=P`%m7LD$7Di6?Wg2j#1xmxU#%P}sP+Iswe
zE;$F<7uW@{egy+i@&nQF*<LpBwmcMKN32HRGzkv{IF^oMB!j}WG%V*Wr)mP%QLTu#
z5?t8HlLZ>tV+F6$cfwS$NY|ij*5}>ZS5|%IVyUHE@tBT_n8z;&E_IQzWN<Lin}Y1x
z8vc7Zp6?bF@sPKYj!CG;FuykVtXJHGgW2DL!U_U~)aL9d*$|eC3ueR^B+YJk`VMWj
zo~cik$JMJyWTSUE9$c;c2a3m9x`XMx)#}HfC-pM=wQAQQ88ldi^ABJTI+Aqx+;3_1
z9JPH~AK_QpSG>-KZPLhF`k6tp@7u0=1v9s7gGXf4N)m1?Y-}@SbUTB8XcRlJP1M2R
zgRWG|(Kld|TnVFsH@C8O3b=NCh;wN*LR*CciN_$d^vLEtW}hj6uaN!spG;RI0>5Jk
zy(QV-T>S2h^NE2VEZUl+VK_wb$*ZA<%H<U)aBG8Rqmc<Rl0gyS0uxcIEP*UhtkwYH
zeUnkq)e*$I)uv?4im#&0y$^^M*JuKN>Wnn(>;*OY5iqm1*d>fu8)a>gMsMOX?Xv1U
zN#!}EuZEbmFN0&B*ap$N))ZUjsf%pyJ&<u7aWaJJE2+y(t?(l%+ns@Z9$1+c3j!J)
zov<ty-+=1IeiHyq3X_~h#t1fL@Ft-JQCWV1vb@gvw$_$qkf%z;0yKIOPEuK&4{pA@
zd5t#S(%y~onn?VcK~-07U=9%BOZcU^54MOSA+-I#ds}`o<Z%}Cxq(+$2QMQEOwS(g
zN0(TyLiyY)_XyguBm0et>BGjQqdR{i;_%=p0@~!99CFTpH#slv$=$@rJ!HTdKV6r!
z$L4S!=F*k)j(b(%evyAxU3P=x@7ig`=33H_HD24#PcBZ4aWXlHsG>Y$4Bs2T&Xy(a
zsdfwht-tbf4P6AWk+d=WZYR==oa96=dp%JLFAuv%y0%=ZQ*J@9{83hPAuatyF?`?Q
z$${j24@yEvsoF_joR#c?rys$E3T^9{?faD2Gk+GPfA)igWY`!YQ$^ON3+30MPp*HA
zdgQwVePRfiYvV7qj0`W<8nZ77j*ZMCF(;vWJ=sXMNKgzppo(M(C2oU?-et^dTs|g8
zWcw<wj<^=GV&}QqWjl|o!YpU?n;fX(hN68toBQRetY}5n5}`E;JV2mk>nyGe?RzIi
z<(nuPpa{-52u8=jcu1is0$UG;0zXx|JL=Z%a17PN5F%s%wp;!IkhRFNfqPLT!vj6N
z{D!j{j$pFR(CdmU=7&?|(Lhdfe@)`N`M;XhgT+gN3gwusm!;3YjK}3bpH?Cc$gY@<
zSIb?*iP?wG=%pdni^<On?X3GMH3LE#>yB#Kyssy?Gl`}dHF@!tzvU+G6DtOMgS&;*
zE4o{mW=MMV&XQ(X4He#9bu~WarUCS>*6a7@>t77n$D1N;^<tx%l5cTOpiN<h+Oeo`
zm%B+;Cab&myfxn5%GQ_KU?t=smF&~a3}f!Y$M1CM+`ny@w-u!JBIlu9pxl6rt_hs2
z2wr;RLSE3D-51*Qq@?)HyXvIM(To`M4RHcKFEh>f!N;9L8?I$uuDVio>Q&z|ZKpLo
z2%Qth@VSsl+g?Y*oRxPyzFS^f@%bpsa_a3vg>Jh3P%W3;emaAv5~Z02r$Lb*Z^f4_
zKbutWm0MQ-wAg*NoHN%3n<Ns_s&R{ETwY`gt%rfS;uVR^X4#kg42@=Q!uJ{qdC!SX
zTD-sm@Y<L_lfE`S1>Op|9aT@2eZ`{BJXh&L-0+McSu`G{ITT6lb;>ul#$k1%H&9*5
znJ4pCVmEpvV1l!e<<Vx&-=d-S;;j<2ne!e$UP+`gsxOzYE?E@$i#+KSzmf}F2o>-Y
zt(=j@+>NB~%+FO+SuAax@SdebcT?s$M`jW?pcmNAERM!me!0KsWsH-_t>n8CxYBLP
z8&GwhjY`8mThiC%9hixWW=NYaGV$6==)saAF08NjAT|v@i__+VCawzWESk?Qw8h`{
z4Z`lO@ONiH5aA*0Ult6RP7%wOYZEDcjEsvw3Z`E(6h%;T8;E{#y5O6f`GCJz(>Oa+
z^y{XnQrp4?3^@EKrp1fbEmT;FTGSF-{Tmk-m$%R_|0&3eA4d}T{3GnK9vKP*t`o0h
zS8JULp{EL$rd_R;dYe}}<HN7LorE!cR+Ky#6o4}dZl#{o72WfDXlo)g3fK|u>b%V-
zk8Se`TTd+tnUE=+ALpfY^o|=W_I*q@3*mYH@z7zLGu)E4mGaXh1dhCbN)p}sMO~G+
z2RNu`FuPd{1*gW-1@Mo@4V*)MvQHOhN#gD+i4wFP@Z@QLo4_+$sbyi6+{lY?ArL*{
zoFo^s%xtI~pXjY+$Z^9wKz4{__;>W3NAI&^=L?Vx`}+P??*O_demJ0NPGc08qC*=l
zT@;7~CF#bDnbXE9Qkt4ehLRTYnQjK?H7r~ly+WhkwUPAq(f*B>oM+`l?vS|^VJk;L
zfvWd*mRE{l#Etq!q&N1Rrv*402BvE<INKCU3MYQx#f38G-dth6`O{^*X($a^?yYdJ
zgSjU-d_}e}<_xheLXmZjoA(FUPV0p!Iu#yFP!QK~c+`4y^62jtUMk?uy}+ug*K7Ok
zVob&?q_-#14J}GM;NOl|CoK0`IPhbnfxT)wI}CXWDnbiJbVi4~IH))Cr6Uvu6R40#
zd>Z8?-zgQT>?oRBf;Ge5-2pKQ!h9jJ;B;vkEAFuXI>(<}ptVDruj=hhvsGs4hl6H`
zDdwJSwSJd8B*DaDH!Ms`ffn(IH*DW}6q?`bXG-;j&ZbYFJdyJ%EdFd{S|n)L|B1(I
zD?sj|9SR4tDC%^Q`;K7dh00E_Dqg?_PmB_jI1K+@I&;Xs-v7lfsq_6r-Gr)8ZMW4-
z<YM}dY)M|AWN0`Lp3nj(LnpoDrO|9nY(~6sN`sb}+mOACk1(oKO8nX1?I&3jo099e
z6<B4=h3hDlBiF54XLj24>6~z^K9!W9Ai-Yj`9o7b*w}Pt3DsviV~iP@Bf|~MDuKS#
z+f&g7rgE7z`ozP@y)5zEuxU<~XVC79_)XD!-z?^gyH0Mj$Tw0-(5tejOKC;lW&vMf
zcmQG6CpYOY#@=9COiD{!X4Tb)q6>EZ>tji(iSdh2ym$PlwXWe$&|`1Pvj-KoYD-q1
zj{qh46LZD(O9}PPpm1M-do9?FqQeGpnlk<z+YLdxi(<ZjKiwg>#hafaSd$3?r9)b!
zV<#M%9}BO`-moOA*`LLvH;rYhpB9H&Y8ZX<Hd@Gilu`{OqBy--Egk86j1i=_Ft7Y%
z$4_3-w=a8U<%M$JnZPh>^b>?X?u;VF-r8Z6O)<w}Jd4`R<KzC;6aLxr$&Y<CGriYO
z;cbN%tfF|?8}MXa6H+FwsqDsmae&p5c?nzm-jLwP&QJ4a<+i-6LNs_GgL42D&CADM
zrFT)if@^c5+2Pv_*V>D>>%m5aMYvwILD`L9WQ5*g3DqmK#mTg`zr4!h5r!!^1s3n~
z$n|TFTFG%M2!FUUU|)@P_`?BN-LEG<`tj<{9~RPv6B0&$+@nEg2^wcxz`3bJ!X4tg
zD$==y_$+VV4p3aS{#UfRF9mNCZ?yXd;3liIw6FWTTTSIpb_LPf(s;5@^5f%D^Q{BZ
zcF8D|tZ&ooLpwSD0Qyc3Tc6cCzpFD+@ynX*?W7-C6`6b1dd$|DIT4l1O|dgZbbias
z%6v9*IkkRuUTm*6mksedJ|D$=w)lV@X?Ku$K!X!QXqj%y)-y_Fw6BMT(H;zUS@>2i
zW#_<I5@#m$l$p)G5J6lX<ldN#-#)kZB0t^U#`EMq`LfIKKD<dE)1_f5HY5EQ{6X$G
zdD8C>_b6zI)MCA%o`m3cZ0xa?<RGHcR3{X8kH76J&oEKJF%~$+7%Bb0`>xKnK5)ER
z@r>`!H+SYnP=?}K7C+|D#45cc7B33_d7(h)qs2ScM|8`|L@5uz%D%}S@5R}L-dh`G
z?cmrTZ@-m3<YQ`6G*2?y6yi>kz4YJt$*`C40nRpb`%(8a|FZ0Ym@VDq?@k553t4qz
zTT3w-o0}#lS{kbI*bcjcS9NBxd3HPNm7>Msg=}xN)NxDBI9`wi4tcJ6<PHtJ=EfUR
zHac_5fqkVmy9>Qv0r4&gx-B7%H!HZCPgtTiCck22dI=+)BFJ-;fopMRJLJD&3gqzA
zjQ>*5L|cao{hbl^$XgEOjkkP+Kd?SX7COi!0+d|X$MfK3B3#7C9NeC~T|U)zoRpYq
zDSIRDeBkp}d($vCy?$Xml4zi_)tcQn<5RB~K3Ata9fT-^G`PM@@K(6Km?woxc<rxE
zy4izd4j2u`XY5HS&tnb@i@fJA`-ZGjPCd;cKlzok2iXt)rl^$C7>807zWOm%aPI03
zN<#o1Po2+q%|8B@hckK<#v1yiy`>3yiM2vl@_lJPG$<cVLCV07S;W6R_K#E`bxCgM
zE)TUl@9Mv|37V;ArxCZ9@)q-i+K<NrxHcMS%ojn_!aG_XE(BQy$D40+{(@s)X8ArY
zhz(MK<^0bP6d%8==FBdjnL6{>H!PM=LIo+96P*2qHlpj&R5f6!Xt54cFOALZ)YQA!
zYJoS=lB7j<#9|5O1YWvTXAInuomg}XYY{`V_<;^1w02S9-p4tc0`sL$9^dNhU!4~2
ziqpmMTik!djp^6eIC<f=M7GJGEW-%wL}yO<z@8d2#ET4o(35-=1A&$ERC;!PqXa(%
zxruvu4%SOI@;DbVV7Fw=M8**rzK89j(#PY=AwtCX{{fy$Zn1RY3^Pg!we8nYC^PGX
zsL^{fefu<lxsU!cszK`~DOlBNh&;U}^m#GY3^5FQ`bC4e)KhUA=|^|K-Q;CVSk$xv
zT=lg;Nl-|^nyn77wYH<mA+N@zaA2~{Yxu&L;AR(pz>nN2H}JAPPFGHgF=_@H!3!T?
zOV)P#Udf|F#FRt4klJ;Fb+*@~N73U-leE8*0lJU*z6dqwKv5maV#cg^qzqYYLN4bB
zZcHW34PlMCrq&UnT}RP2DUn~2w&2kS(Ojv)t}g`5r@FRYfvZSKgIkiP=zh*p*jX*{
zX2(ixDb6X38*7O>NXtdj%d&pN`=Y&M=7zSk{hD)Io)?s!Q)y1ksb~P7xeD5Fj<nac
z6`<`-C0BIP*zb4zJHxbd$A5sA+0Jg^H>#|ZP}lth3M_UAEOK8oUp3N7^#iA>Mj+x%
zMFn-Vni*XR8IWeWV#Tv#)>FpD#xf0!K5ST9)V7g5`b}!<NKbukZ)RBFocm{&rdX?h
z6>Whjx1{FRN^NPf4++ts6cL=XZ3kpe%aB#5dMjA5Nak9fXjJihi1#_7#PO7c3U8C2
zWWIUl{f6bc4{prUXQh7t!hZltWgpjD5@<UPo$^%<I(R30^+h2OUY=ul0cmZ?bANm2
z0}cA)A#x<q0px!G9?m(JUh*Ku0y1ENYZSDv?2*AQfyQ`UDOXJ6EKO9Jjg^4tsQI>w
zQ@Z`C6f)L>1M!E@$puZv`Lnzg2o~-#)d;Jw57B!^gN#`9%F5D(k!9sFR`2;>7|wgn
zdrN`4X<#=F%3x0oaccQ|leyf5FgyO7Wno*$JP*tbr5OZ_RMp_T&3vqS_lDs?2bFCN
zfC4~A(kuFi2hx{Wb88c+%%+xYi9x>_bP0YxknHIE17Q4(Q3iL!Xh<J`e>o%(*1(#W
zta7iDWV!RfEiE0fB3s1!B+17EX+%%Upt8KV1_DlSD#Xq@p-t*QNqq!_@(*C$>SgO`
zL9i|OK~P*Lga}t4VdPr+H>CPt1-6FcBisIKdqpCaqWeS)6E6i3@D^IlUvTQXT*vLD
z<GCK$WEJkwtv*XX^kpal7D{+<Og}m64Yi2nA*eoTMuIX`WX^jVQ+P8rLR&(T@_;Ut
z>teCrxlt_%#^TbVVShbnQbv1KqHEUFG)HZJPq*B~sNVZaAia5T0k__6*q34_l;c=A
zQ5WdkJP40}O)!{uy(~RjskfzAG@p3uj%wx7mZ92oW&1@l{x4IexkT3II5@kPZV@Eh
zWl3TD_ANZp5ZMxHE7?s(C&mEv-wctlqC-UOY&LM+Ur}(PcF|0M9_6XLfD%18jdD{I
z3&#v+1L@2493SB<XN@2ejij~-7Z~d6QT=~_=6`^KuO?Hrn{A!hz=C8{-ITz&yybOU
zYp1qGA&2vWh3j&LGd9Kg!Jt8=rWM94>E)s-|E5<4on@O6W@e`Ccy(3eWMNvfUzHvv
ziw}^RGi~rp-IAO+JCX{vSM3E&<QxBjiEJEuZ}6IAT3Ru*v3Hb5)GtN^X=d%)Td8!O
zKjJ;RnK=cjz|`0`WB}%g&ezK&NP#Y{g-2qgsnl;v)s6iL&J4OI%&w~u_*#nL?>4<S
z%ShA3eihp-Kv|9zX@IozH@Y*-cmxS|wY5z2BpbO&PKQr&O!X^tG;|NU5?xT?+$F8d
z?u|C~=+x*&GZ`NgpnO{;QGY4Nz5KC#mzzLGRs??)G2slW%1yWj)#>ilD%lX@l!JxZ
z&fm$y^#H}p_7%ee8V-fOo=kGgN&AY}VNnKB*00%N`!a$Q7p{2!^tWmU`e_fYJfiD!
zf1kb67lX5(T&a^FmOO(5+^P>wR5FY?ROnMTrLXnb;q3|fC*6)0m~(#QvcYmp!Q?<9
zs%4G4JuzfipWx8^`e{6v8sTpesFggE`{etI^EES*&gdB1s`OIaxKDz<YdU19?X!)Y
z9e$rbh)u#UU214R!N*B2ajeGK<v$+mR?$Xn4%+P|kt3Q>_11&*V#7Ze;t>(cYH4_v
zT+qw)N7!vUntu1M%AYO>GbV|nx(9``9M(L|m?>EtTy`_B2!HWB<3pODb*!SbUZ;Bp
zp>=Y$K@U^V7m;1%5lV(UUHvPB@rGhXuq;-hB9m|ZN&|Z=1`QA8wV!oE?bv>KwbXu#
zS<l@#Z?VeQ`HEk@O7)I)p`q0S&%4{S&ZDYUO|+K1K!#6QOj6c?<n{J|?s;}m0d0T7
zU;29sDy~25t3a1_$!oo!L-s=ka|Y->5H=00yhBJKVWULte%`Nhuw|+U47*P`-b}cL
zrfz1Y+Jt`;>$cSnA2?k0!0^6WQ&Hx5RL@fGnKj$n8H#l@==!z9N%(aKN){arm7i3I
zi`sZZvzL@_X-VoyMXl96&T7V4J^O6aQA^@M29X7JGH;(Ne&}5^xq;!mPPYJ*2{9w<
zg_u$8Q{&9c6Dm=2BMPV9uzsw%@5GRMm#lGNZ)YXg-hfGBl(MhcnCmqeS_jOfa4Gs3
z6UAtV^nNUIO%44yNt7-?(q#fX%1tzX&WVV4*SXfJ(fe!UrtLk@E;Ow$`lc~JTVd=u
z{S2f^+Bj2@0(%Z{1!9=!8^nOW;^FZC{2PX?A2eJ>Gm?#_E@gjuz`Si-p6OH7$Dn8V
zamc1&ss$=4<Q0LYOBLphIt>T4XngCSR_gyGQv*A*P{lrn7<2!62>IxBcJ#4C|BJm`
zxy_U<qB8PMTz8dlhmxDm-UO=S<HJ*woBj~>1iH+4{~*)qk+D*w2vTg(PYl(S3YB;>
zKcQ*P7#neRYLPBM$c}zK^|Aw^?(#=b(wx)L7vHghcXP(3Tb@eRffZbZ*FVk1&Ch5#
zH%BU2E8p??zOy#W<j3We(QbwO2h4Zeu%G^5KnkvH4Bj;!EspRyy3pV#aW`PMAX9x_
zu-IxB;?`YxiY-r}`V%97qa4`hr+~DjSW<4-N?w?-sY_-|+14CyMc|J0xj_?&$Y)+E
z{H$VY%J=ps<qTU1)d=QKwW_U%)6>e&rw_E0gRTsp(MMvgssyfZHvRCIH+Y@9cZc@$
z)$;s3Re?3)E>e=0F<H=Dv<mguXh6n@z@D?0p+7^^!ZsaWd_M8hi9uAJXO;ZSD~s*4
z6jZTfArYa-OzOCiqv*Z+DqdU^!5lo6oNYHHBtO1nrC8>UeNho^qZ&t8$v@*VAxQ7@
z+o~d;{t)J#w;0<i5<61~HYYIx-oC4aZ0Tg>rp(bV8{0%cO!Dwgc-JeE4)FZ-u+KU}
z(q-Y72ywRwAEa2E0_|@#($VDnK;_Ph_BKhc7AwoxEsilmNcQUp*$8Gt(W==PpA%wk
zYsU#u#VseB!aUoW*sHx*PKwOPs_*{Un5`mbQ#wQ1j)yjdX3x&Ptw%omgTwx#gHVtL
zrrok#9eU}sB<Nc&v^!$OAIB7LqVuozJ?K!E<j?czJ-n9`ihlrNacKLaY4JmYseR1<
zA?hoG+SuQ4v6i-2p+KQ%f#T5OPLSeO+}(;>aS4<H#ogT<iiY6D-Q6L02=0WOn{)nm
z?){$0%<leX_kAB#_~Nt^0=^@U-L=D(Yj417KS9Rys%v}Nhp+PQRCMRjEA!=a_Rf@_
z(lL@y`)H%?kg30{bcNo1kWi@;Ud$`Q$hF<`6Q_heKm>XVvI0mDNs&?BH8DnWpp|df
z=sAewG9P4bIpY`~>cH0Mgmz-aN_(?+;^aRlDZl(iq-X9=F^!<&ufJQR%S%7lzaX6~
zRzlC>z<eS5mjd{vmN}U0T^w(Qf8}Y&?Zu&rPn;l0qpqV7Z)^K+$0WAFad3e;+}_9M
znWb|YE>x^nu)eClY9MBIx#OLMGWrvbHu#2QK*YtMdY~Yc_llEKx~{J*J`cq*>0OkM
zTgJ?JXD!xuk3T0ku2l>7qJz$Zn0vi1sm<LeeKhClr(Y7dI=iLZfsWsA;Lg_6=%2(T
zx_WC6aIuT%xDP^g^}2rD==-AoaOksoiD~v{L#9mi;qgg3f3@yvnlG1?e=QPjP5LQT
zUBTZUBT;&kI(B42B99u$un3I1$v5czVC%GY>%SH_ft^`oTnNvUw|-H1akWWd*Vx|D
zJjXppAV;-Mo4WFTu~cQWNpqo{54%LajOOhSnH$M1Oyuw2`N3?~EzdAV%JN7Kp0gmn
zp~kUqbKSV0XDZM7==q^XR5-KJ9>-Jh-vA-&S)La~cTR#jeW#K`yaUF28(!)$*^hG>
z)!mlA^!bK1f4Hw+BEuF4ypft8wn#nQUXKv;s{X>kjS<MD9;mrz+Rn|P@oU((W>81l
zRr1w7zglGOY1GZ*!Vlt-MdgkV9e(U5{ij~?57nL(a?T&kmJA0S`Oh{tYG@s2YSI$#
z3=Ym~0Ym{(0njVa*f7{lx&8wKl4)w-#Hy`7O%F2rQaG0pt3Gc9{^3p17)cV&>DDCU
zbnvaMFXeY-<M6Q6AWg%aQ@Y%#%XLk|U%qRB4+>x`pj+&i#(RcmLG#1$`z!TB^Jli9
zqrJzch)NHYl~WPEKhH|8ga6{IcN|og^!W$@gshak{VnN{!aj@?)|ER^pf$jC1idbL
zT6J~iTBKkpJ0I@*snkzaJ|F^hT!D<w5=*@9spnfH8m%O8eB~HB*<_<T_O$WBmDo7V
zZ_O$W-<(868PPPeY`|09s4RMyf+Cuwg&oIj-)Dg!D&``Z&1d8z|BPZmDI&u1=5+pF
zY!&?fu~mEV9kOU<p0uvaeF7r!zf^km8|)buuJ-dPPn=o6P5!NCmUQm~+2l7belQ5B
z+rVwwdQhXgJ!?8Wykv^+nP%=LzzOdyf9-u^KXtqRF=2~nD-&E9!~ZkGMex2x1R!p3
z5PU;=0<*~Ui^Q0%&DeU0Y@OLbMd}xyAjL)sWx0LMRo&c1jwh5d$S%fWd`~twV@djD
zl(If{7pK4wkI`(ep!*zh&&VB#RyNn=2f!RBWcKtuNn6b<lG|tR!VhxQL`05@XPaW8
zUHH^aE0LS%BCk1<badXys+|A6HCMTgRKW&T<?B7KDTfpRTv?TGGR5DQP<k{V3?z>W
z_9tIY!&tLldT-5_6<yv@-tT}v@{!Ko#@u#x(H^>1xK^g_l=pfZ6<@_KedoQ<lXk4x
zkxGyM_2vL~bpAIa=oI*PGkPDPF=s7DLn3Ekrf8QHCy3PeGq(#Xlvk-y#UUf%r4`c#
z4J-Wlk2F5BAIzf-O$XA3(2R*eY_}|)vap}+YIKg3ndo4LWBI*aE%UBf`Tm{!2yOU;
z>*w_bS}Obahz4J7GeS)(YNS_Hb_i~}Jd@$cS~t<>ED;517Ss%{DxG`LQM<hFN!1pu
zDrY@t@0NI7wZTr2yEbl&x6_qFbx#f)18-HM0p8jcTZc+FHis9dj=G`)-TmwRS(b@~
z3~83_Pj+HRP)1}vz`ptWBTCvKy`BMd=L5Ms*`dL-Rk0l#c5({m98o{{3%?<2UOfJQ
z|LFVWlwcZoLg;fBnN}8nZnV!v{XlNOIsZQ>>C;9bf5B<2(MvhD1Etk^hw3&N9b;-E
zdM_jRcLv9vrR5GeGOjG=PW$!-S-n{%@V2#h*0sOQam$*`nZ8|-q&dkoRix~7f#e=Z
z4yZRzo|^xIqFrY^jt9pD?91(_e7FzO-^BKmGyT47%)BC*LU9M!0EjpGYGaC~?X7YJ
zdYpdBe4Ova%^6QD!-h~nwTheT8{<+!w<b{N0!MnGGsR}*HL?Hw>xB~x`3ZU{10N%b
z<EB)Q_Uk|Hy-(d}>w;yH&@?NM6z8m)YT6j@30+*7`m?--Ri%eM#kZgcxKA+e6{!|U
zne>I{tU)d-bcQ^DZtu2F)Sr-BbT<THb!_<8B{clD%iR%2KYHjuU{HW4C`GBOZ9vfC
zLj%S9;+witWP7TeR7|OZ`cwMnFc%>5@<^$!1Yu~mB7)lxyj^jx&92K`j&Krm5Wpzs
zp?cMu`ze}PDrE(aX_C1zC#=Tjk*cSC`^@ofN5PVxWMw)lK+|efZC(o%o47wznRlS?
zaZ5{+!^M}uN!oxPNSIO|hQOq{VzU*+0VUX+Ryzead!Cqh1hB1?VU@$Uq5VB9l@H(D
zf{?9cgUUq+jnoe{`8?nTi>0Eyo^tF>qx-57hd#V7f$0z8-K15;;lHYyK!tpv;*rre
zAxPZ`!K+5=IUbUo1ohntiUUWF-HX@&yP``Nx1B}k$Xjd6eeH*$)zb~c&-&^bx+ZLM
zF^xFollb^tM2C)U%?{e7g5ia5U}^$aUF<jTH6-u4JcpUnU+vngw6~>gSsthBZ153|
z1C{&ORR3gEaeqa~`8Dzik~g_HQ*Jo<`hq=|8~?XC9_flRZyfSw&(!hw%iWOG3W&el
zzv(pEmL__CPFn3?^#ZXB3J+x5!geQNmvRXIrTJ-eJ-|x$UFsqOsbjwSKPYn6m_7-{
z%2!Efx>sDNOF6QX8_8gvxj*;><aEbF52P3_f1mBFHD^vvPE0$$3KgW8G3;_6PW^Gr
zCrRvF2k&)NpxUZ>9e~_jY1B=Y{(n$bk+e_1tmc!z*L0cGEK3;{gOKZpm1-z9e3k?K
zqB=bDnVhny67^Bbqq<ZcWWy>4wl>^6|BUEx7m0nd7yUZvxv2TIVN_m?Z;lPm@Qh)_
zWkrkst~s(55Ut|DQ&6d>x`i(B?JxJs?gN1nrEg#O#rCLd5L_FDWy?+5KO(OBh&@OX
zRGKEqF(4>ty`s-tMs;q?N@ZhHfi&2CsI!Si-do6B$kt<nZq~_>jqWF7(G}1;#Quda
zq(;!A20??JP~N@bq7<th-}X+=K%~m?+kxNbXO_qXs_|rmVV4j=Vcj6l=6%V0kCZpw
z;}C8@VTbdg@x_-p$b-bggsA2T7OI*CpCH?tY4%2jP_>9$PQe_AquW{V3^<dlu_@Yz
z3%mwc9{0gO+mw&q)?iFuBD}G(KVWD<FNe&uL{`3{VQILt5A+L7^hq9pxfVaCrDCT&
z)D3u7+1SJLpC&{)><lKqSb0pG?djfS?gJS438%4RW>#TumUqkHHkUf>iWCB5T_184
z>N6zir!SESu<arxoeeu!LLBX^iP6uZhr+kF6%lQJV*V;GEzQaH8&h90%kW2OC@k`*
zI3FsXeo&K<)B`}T`B(4rUAw~cMk!bnWN5y&HUkFQ!sayUqCw!xsxz8@P;!1_LN)A_
zp(0YiHddv_7DVF%(X2CUqi|+of8P8i$O0)gFa!>Sa(^2A4I`O*n^2b^15vlNYhGCv
zY^=@lMIXE@izG%BAE|{Z$|i7h*I4&iJC?rZUw?nYGy8eO<&Y)*LR+xWsNf@foZf&U
zJ=1<?!}`iJK1Xbn%;X%<F<5L{JjS6f$r_F{JXEO*+zMZcbm^5l;N?7}?8p|ON4@(A
z;24+rLHpG!M_1q6fsrZ~x=Q%toibbehQf<6H=eRM0NsGH@}}w;a^WvKxsQ$UXCChG
z?i|6YVH*&ioLCRE#{~$6C&@6N*!$%OT6fTR&P_{9#EAmu%W<w(|30%+pMNSCFH&-r
z$WC?mDK>(Dc~hw$%~CWN{8Gv^PI5Ijy<ha)W>BEQ(v`Skv>OdeCrY~+n96fFeLwfp
zWuwGteQt(#w+`e$f7|8<6Bi0VL1Av39f;m%7;iW;M>?|wpP7Bd>cw3^zjPz5pVoo$
zli>DL6;1^wTxBw6=xPj0y!hN)F#SWGmsO}(#q`^0^F%7pQy0Xbrtu+!qQ!-sBgW^$
zOoE%h73#;jC`x3Nx||6QV{vrx+Xq<#*$2jIIQ1j?tVm@AzKu@kqPhrsSp0|g2_m(<
z1A*~5gFJfDaecfoM6N!#`}xu7ooX(dl#RnB?$R2r?-OT0DiY*|Co-Q$8B*b>cc`<$
z`YtX-MtEY+UCUL}qO$4X0!}lnBuO}&TkoPmk<D8$1D&oQpW3lUT&jOid{XcKn?#dc
zJ$%YaB>3do65j8Z@~>ovh|d443i^Y7uH;Vnj`!NI;8v{bYO;2Rxp*hH$-v~sgUDK9
z4P+lZCk;#^!s!iicYeiiSNgEX3fQnbjCI3DwsP1~L2`E^z|PD3{nXTNl{t>o8};i|
zgC?CO(szG~S{s<x*p;1+c-s37-FL5EW@IpVOB?LE;>dVXE)`Ofy8=E~I0OwEABa)#
zw0YM{s9f;6Xk%eCiDA;Aluqq5T<?ooc1n96<usK%eTrmja^(b5qW~xDGVw2Fk*KU2
z5|wrTA1Yf&99%a3L9vc~xYL3z2H2DD>)F_rvT*P{MO0TNfU94JONeRR<FLZHe7_lU
z%Brl~+Pm27v@b@E)vo%1l44wr$J2pqXkY+?IoYWFj>l>RDKrIvz#IEtdjwv?CF+*(
zx0d(KZ6;}}e;84oTOtab4h-8Dv~a@krjZ1Sy9&rH=%y&Snap!-ynPYbYZK*hCu-}6
zMS2<PX<l+Rd0DN*Q_IfnO^?J&r3m3=mZR)zFftXS#O=g!Ri$p0cx2<Fr8y~U-5|q+
z${pw#wgJbhmEVOgByWY|o7P3{BQyEJlU$uekV)?Tc4LFu>d$LFj!(e9@ZU}n)z_~U
zrOcvO217WQ4)V6p@49k{UZ1!MLMX-+gd7{&+ubN{-KDg|9#|D9@vd%5?8huGr5Dhl
zadtVU!=m8`nfde0bELd8A{CLM<U&@HO6IJ&gg@xgt|=rib#xamRIn8dK33jxiS21h
zSUVQ<Bfjm=@dA<3Vhv$reK{b3CazvoL8Iw!XI7s3e7EE@jdUjVCZ9IdBW{@(PRtf9
zymS+rMeBi+pU0EnEj6UdQ)KapQRZ;FWl{1&OqD-~Iq)w>cNSZfjirDq^E^y)ZLBQ-
zmgJ6zyzFUQQ`M_l4q<GLZSi9g$mKB1=#*Ax4yPhM4^?c7kpiW133AD_HeOF`+5A!F
z!cmiG--vNe?Q*+NHiAg0N}<w`dKW(eDy&`8?}SD3h3*`s_Dhc0>aJ7!a^4Jw{ZKea
zsjb=8m=vixBusj{8)6rCf8xx6;32Aqcj<-oG2iKNQGfbn-{y~moS}0uivsVQ2vg$h
zRW|;t%{Wdd`<R!K)cow(U~G&I+F*Lsi?7@K%}^@RI=oNK+SKdm>L=c|8rfyMx5H|L
zB0+Nzz(TWSlXV%X7oLX5R&`3Vk`52oSo@{o5{P=bbPvN>N*jN#tQ4_sX9rwRHmQD0
zoOrMx$}g5n|0JRxfVss&*L;Mz(C8=Rl_FTBHu+%~FZ-#_F+@KY0SM9aNi=sciGKg1
zL+GILgG<K2NSdBNV521}IWQ-AY2)5#JaYIohll1<4|p|OaDP3*+cNAf$wISDNrz^Q
zuBeqfw0KwdF9C}@MRN;l*XIuPCo;LB$VQVH*Ea?3q=<k`y~PW*#`u*&MYR4lJD>8C
z`Os7r+j{Z+!n21Rzl5SkZLFv~u+Q3!NJZJaV=;_RB}<R+OXH_KZK1?{GPgGL+JIzp
zo%trF>>s0TpX>Ge{DvS*SGc`wIda3ums2$osS@M9Kgb+T*_#*V5Ae7aa@zz=Em#gl
zIwF^yFLoBqA@68-Tn}i`>7H?{bp!StklzvPAC_wS<BU!4(MwA*(hr6n!WO?;zG0S-
zB|a=_R|)vr&V9ysft7m~!-CfHqH&)1)F}Zwl@(VEx;WF+r2o6L>!+8M+}nce>1LN<
zxcK929}tbQ?in|#7yx75+^&3(t*aqjRB)sD*=c(t0?h&AE}Z_B&vIN}4W7Lix3yd3
z6kSS7VXeDy7(@q5M$|OB8QI)@v}*iG{Y{Q%4obkFCLo;P>_y}bj8!>!Aiu|ZAU694
z1%?z892|g|Zgp;VEkVz?FYNV&BR?3J8hssBf_(G`Wb4hgGYCEKrC#-;{Tb6b;he!O
z6xc$4*eSQw3&a=A#ffgJFA5vu82H<eBg5(}65`(EF!@@vY_cuG<}A_xaTxRUO+~*7
zc!I^QcLA@`c-5f&^{!rvCuZRnQcY4g-b|e$$@9+ehd8m7-mEQc$<obNjSHaWNQbp^
zVYfjfUr4_XVYs)@B<JTpAc17T9HQB4Jv>qnmILM7G5r|;WURM1!`=wez!z(zBxz5>
z8E~w^=ORGcVeSQ(^kssTc*|}l>+*)IjrTm6gF3#e6ipDB%h5M#V!YZ(jP5L*Nv<OB
z^u%!mT<aR`jSNO%pfjAS4)Yvny&A66?@G}vv~<Y6%39QC<-MIbX;B!;ZIItZ8pMOF
z33J}&>{C&UyJ2vS=w^sa4_LCq2YO8&sMGIc%5{AL|43fWcgnXT=oMM2XQ3aw^xI6b
zej4;tj~!`0K2z+Pbt0-6P9R39%Dj7xSy$_JffrR6rU(ww(q-4A{A3rR);2k!Up_=C
zBDm8oeNSK-u{nl?OLkZE=b9?Ks(j%#o7G+6wW>^dC3}t~vQ$qH4do1~L9AF#%|X#p
zIWscGHxwcciPxlWxin?1Or9k7td1Y$o}HVTbwUif-FnInW~6UIbCNgxcxLbHQdi=i
z>IR#*o^Q7n(_TT^Wi1m<v0R0?EbENc&mUKWjPX|O6UX?toIXEqvFQ1(jc36plXTAc
zKQ(OTY?eY*aSKw<c$IA=5)&BTZmTbk-r6k{pOEDii=`dfoEDOkM@X{SUc7Ql!)slO
zC@#oeS(`>S1?bznyGGh27gcVfMOV~EYs`R0x_TC!D!&~J&e2VfxI#QApSR$lEkwV{
zWy@FH45b-UjIj%p1L@<E#8zSXJVNx|g@GITJ754C`*;!a*+eq;^K*ia_2Nv18Pt^U
zjMd*R=7I~@x2^HK$>j2o0fp4?^^M^+j(9z<Ts=(b(KNc&=ihA2gSQg4Qj^T%K}+si
zv2D3Wx+yipQdWnBo_l&8oDya)`GyDR*52HRIIA!)Ae->qY#}q$myDWXBfnk>#bDl?
z4Oc9VMLWG6vc@*1J{LclsU{{9a~}{`&%#Cuxfd$iBb^OBep#(?b_^kah*pb_DGhYy
z?5mox_PMs}b56P^+>C5BR^PYrxn=c3OrEtV1htXq2lsbmBiKO}BDury#rQx{P?4n*
zld?~2nl%YbZ7Q+ey1t|6+w$ZGdQ!tl%AoKnvgYU=Y-FLBxJbX<rV_2;$otdRBu|uf
zk@Ke|4fAx-#`bR8!MN>F{x9vK<v`z0)Ti>jEo}$1q)l1aJ-%#rJw5}OheG>LKj{Qa
z)8UNLRrFB`k$=dJ${eCAzZ2e%8)EY`knLhr)A78kNi)O;{~D2k{_H+AlSFZuT+E0Z
z9msXI^kB8rP}3GRNu1na(N{_l>X29f)PJ==3JzCvx&WRv|7r>IYkGN-A$p2cIqJ=X
zkf6k)g~ns<MzGd*Js}pGUg!QlBFJAswdOM$Gk#Qmgy#4+M~t=R5YG13hgnOnCsVO=
zK()Dtzh-dnsh^&bh#*uAmi`+oZ|{N6$HxDuOFeeu`rX<n`X$==%tT&SHdKCL_Snrq
zAsi*NY_%USM!xbME6dKK+0djBGbx<iWzoMMEi}ni13%Zi-Oc6c%C@sXL-&GxU<OOZ
zy%dTl_p}{$b76J3TRX$Vj9TP=|D@P~e`*HURg#3e!Q-<J2$l|9DDXzrq@2>{J%5kV
zpQ>OkY4!z=TV%-LstY+gYs{AkCu?Z@Ml0Q;@UF?EBu%9a|M)#-W?iPP;&*RMOEsHP
z0^bs`l#QtMiZn7sdZE&0p>AE7K#rH)rF%b+n-!^ywTfO%TD=T#Z)z5(D&VC3vr^qG
zS{jpuypp3AMKs(>xy;y%<2uZ|FGOCrnE}F87OzFdN}m@=5igL;G%c=&bc+kQ5}_FU
z_koLXd2vG<?+o#GP$&trLH_NoEA!3uD^pI!vEa=t&3E=A<urb)twD3zs@oKJp*7VB
zr~5lzrZ(c*5|EE7ez?i42)fK~tzTCE<QTd3pJPH+&-cN0yUhyxS}&vCQ7+?h0w=e1
ze|p2>(oQpY2G4mn0B;7WigvasdTxcV%2<<^n+)Z4lP*mnUGXjeX;sj5`GJ|2`a<H4
z_3V6|x=6KJ^$FmeCS2&<I;mqxOOk$%%4pje1(+eX_o{-=$u;6JLJ`6`!r6u}b_Grn
z?W^@Ns3)j7Zhm_9SxajAWD~h6`97^?Y@D{Re8@b;|Ir-IeAx2U934%uGWjS_oE-7>
z`lgC$?~*n{(|Hn!kHxuNAL;Rt+iDpCV;{6ye!I%)H+IoNz1;3Kf&ECYtpc}Mi@u%E
zuuH2VvozP8u>iKn0N1^Gp-sVz77^|um+6dQFLm8hWpFa2wptKJgxwtE4`y@ynyw};
zmbV3I24)+jD$EW64$bJP`klBKu1IQD06nn<!UmJix1jsthlbhC_cmRY=U(7|PaWB7
zcMs}!wxYQ#0q&JMcF;;+adH}C)|(W8_WG_ssDWW_I1dg1#>~krTT>Q?bTq4JaCT63
z=$b`}72N?TO!qM?>a-N=b_uq^)DmH#RE1Z_uCmXecU5xY@+81`Md~@bKIs<O7EbSe
zJj3f%QLLqF{<}qm=O>eBX|dgMuS3;XN`V}~Vd5N4rWj<c?ntZJk0)SEPQeoM7lRvB
zM|NLqDT>dfP1^@)ITn!#c`TdUS8+r7r&~-=Lkv+;7sqI42<Dkg1MQJvK>T*-FX7oQ
z2?j)K@4suVDPsma%T;<V3c6QhdW4?Zr41OeIT)#v&_?%RV?S!x)v<frE9)UvCmOsg
ze?K_Be1mjy;!5+w3@p!r+0xTf^g~$PVpAhdYlJ0>{YFgZ%qc+2B9`N|hpV!37EzDG
z4m7cQ>Tg^^<F($*A8UDLg8OB*$ufn?N<keM1FL~p!4Jsd6rm)6@0UW!fSFd0`5)kW
z{ZY3Q80cYMSJ8n?Hl{Yll?4A$E7WgAr#D8}@4^02ZS~L7_uuFB?3?+eZ4V$4jJ;NW
z+s25CG0r!w*W+qtA`L1nt*z51^22+L)6%-lFGU2N);vm8DBueX6zour=LK+r>OE|w
z1#XADoWA(ANME5>5(Dq-;=4H@s4^vXQ*0&{GiH3^g!L~ishAna87K2f?QF5Hvb7}=
z9B7Vi*u+1@<uCC$3AMT1)Y9epU1BcfFNQ96J0V(XO)s#eKJVF#m7`l!hOYgB#1?zE
zw<-wNuSv4nCNxmgi=w+d`U3^k7S)dA`qLLOq86N$ZZ*S~7jf$?;x}rg%RkkxRmrAn
zpxdJ62pp7GSSvQCNcq|YI{J8{y0l$Xrlf3rkDJ)hO!baJQ(i;!M!$yRxR}joWQs(k
zUs-ARx&=fPSR@Q=4gG58<XCS(Z+y~%tyVujgVH_fiz0qo3W;kTg;KktFa9Bt_qA+q
zj=|~Xphd^bnz7|oK!zOm$Vo{Yz8)s`yhBPSto5`DsWcxcytUy`cArn@;YIEqITSw~
z6b)<@EgQm#)Cldl2hq>)r~CzY`VAZLYcpvu<B8h;jJ(2#X}>$t6U2+5dyhr5D@!)2
zQ;<BwPb>Uo%jDq9<MBuTf?>emZchEh!&~6vX7mqjU}x!-z!xk8!3FU40Fm@9OmEI~
zmGHcJrQ$@L-V>>yx9l3oa>SS!B~+Ylc)6my=&-cy#ScfuhH$9yUs7<alL3lVa4MDk
z+T6%PXs@y(Dzd@Hg*9T(;hN}4Y|FJF9s!H51YfJPvD>p-PHZoSID35t1WzJu;KwZM
z>|WIYGee{MPU~H{X;+JvrAIdG*=3ReI5j8&y0fmW^>HJ;Q$J-O_tVFiE?WZii{XS(
zdP-%atNgSK584*ohvK8P8P5s=2#J<wN~m%}9b&3}wxBUopeR9SD|ua~%4E-z%i(Eh
zz*a<i08OFn`f!wGystLP)Z^g$BXXWf8Ljt_KCO7v&Zrv)I>vOd4`hoSEM**tQuSK9
zZxM&$vnqLW_jpqq=O_YdOV;ov+UH^RH!KdR@gKi!G9Jow_&6q#^`1YD%*yIawecQ!
zKFEx@DdS#v@~HmUt$HlP>vQq)SVO)oR%W&d_a&Q>_)H@%$CVD6TXiIZL$>ms5cjq6
zZRhj5+Bm-6+#GsBDajj>$+wz+*YCBny@m#f`Wq`r6W$u4w=g9xmQJbNx-?Bhq^%6E
zsCA7En6tKRNR0Woas}zW`*Kwjf)(9;rarsS40AXwZcu$?z_B5d8BsJP&sM_#ZgyAm
z`1z6yu#luWEI3gTO%l9sg6Z=}+f!juKWlbK12QgA;n{Nf{)O`8XHY`RFwyn<5cWmQ
z^V3XsESx%-dexX6Q0UmiAZ^qepV(BwUKM5Kjs0Sjlg~-Y0nR{q&BO2C>qVf_lZx+j
z+c%uo^3E~kQHoK=)VqPdos5x(2&!d{$D$e3#-({aAb=jbEl{yI{rl83;7fh2=sfMA
zP4V7()e3BAEoDa0Bq|6|uMCiS(Qp=|1HG%1(QoCHw0VOqT_ET3yl-Q?|I8vtzoFrb
z*CEf8^47!(T<xUzWK?!(+Ifs*sNgIm<k(vDmCk>ZS938<_$%;(JZ+mj5Q<;l2N-4t
zZh4eSBnU^6T4>p4|IWjYJwsisR$%tN-8T_9?!NRYYFedBcp-f`q~^~H8#3;_%10Gm
z#E+C|?j>F;Wed)-&WiE5(9GE0iom>#4Cnhew4y(IJxG3C{N?wO5d0!X;&QO`-tAgX
zE@0Y;I-Qq9wJ%c~{kmviJtbsauN#ZZ?$`Q(5<!?mOE05Pr0)R{P9#bS7w;Px#`Q^+
z4Rm-pI}dN$%-#@7-7r=b3<DJo6>x2nU}T8T?I~YQ>p9|{beo&kdbG#s;^hQHg6_3h
zcG@I&|GLd4RLRul)g&Bm4Vz*>=`q5|YHP(*0Kcmuk1|!KYG{*%%M0UwG9Etp#!84E
zA=5r;hzN__tSk6o-PCkhiCH;K#@~RPBYr@a7=Ij$cY)rt#iCw=OM6-$<s@GE;JF#f
z{;UnFmK?YX-2a{}fgW8*eJKN4C_l#i%>FhmLW}3hxn<OztnPkp#bIE{iG~Cff-_iw
z<aRV;edOZ6&aWRY#*u+sgdMAdHJtMJ6VeOnFug}<Km)rMijyN?i>8c|Mfxf2#Sc1i
z$A>VLi5eU04{XyU#|MCOe1oOQe__(wd*A2N|EAe7B=)vaBEvvrCItNWiQZ(KU5vk*
z`bSxHQbqdDP*a+%4{g50qXdLegE1;lIJE3M^w1*nZq!j!b;9R{Tti!p+w(G>iPl>P
zNSUSHh`C-S`wHbj<{={Am+8rUXru5hh9l~ZXB6))-G_F)4>*(&c7O0dGlUFwE@B2B
zV8#i~#Z7$Ub}qXTOjy^4?`qi^5IW}F?pOVFYYKYf`2!QM*yK)03dX5=@0R8`{Hm<q
zWf-UN?v5W`NxZ_@X>=E0!QAbPNz-^jx?o9HZsoXsrSX2h%Yw>tLfCgdjtdM}7CTSO
znb)JWXEz>?USanrI~IUkmtg6~K0p|rFKq~38%al-n?dKrcC|%m?~|T^lItVDb(h>N
z<3}}i?-kb=Hu7f`WYSbt!xG+u+aUDWwsr}r+dYV!c@8~8c;)s*91GK=lo75y)CJNA
zGoq7JVU<l6V^T%`5X`PRU-3?$#%jT97hKTnC|n<Are7;k`ncwct;MFN_bQZZ^tW*4
z*PO4Rl2<g&xc>e=Z;o!r*rYeN|3OKck-6n<`&dB22A1YSKf2P!7e1p{V-MtrO+HK1
ze|P-eQ_xkfV=PV6bnjG1V0b{ZBt(hHs5_-t&Yn~lX?Iipu>18b?%Ji&545TAk6?hu
ztI6l<ez9jh?Ju)q_SLEr4)eLA#<%rc@P7BHbkR{~LiRCscw$bw8Q3M4(Erl|m2PkD
z-%13t@uM!N*st}6e<ZQ6zsobTUFA@f>AR5tvE$M#cb;4`_JE;G0?u|Ay#$_du~elT
z!7_<EQPd#E8(O)TI{}C-gyH<=t|o%zPW4(QJiX1_4j5saNpFn5L>win)KHe3P0{Of
z1Bvm$&e-Si4-jC5<wd`evTqEd;#gBK(cC<dV>jUT5v~gn!14Cd;Dck}Y?hU?b1TUe
zA^~B;ic)c#SD43!cO3BJdG729UCl%-`tyNu#`cIKVkvt69%+75?{3PbYx-D<oJy-y
z-ifFvRBCtibtS(#OIk_4#C18Ac6QS2Oi!w9Lx$TOx($bp>F`NuZDjC3q0pw=(Gd52
z{(3j{ZB@2{PQy7oHR8bjDoB_axL0WM5&fu1?v`h#*@)+Y5XJjL$1Lh{?if{#Erogi
zN@vOK<>uI@@8sLfs3F39=;weCmgWy+i&sr^^(u7z>77;{$4cgz7jI6@lW&N0)`X$$
z1zA2o`Ct7`tZU<Y`NBW(s6y=9e}6XESXC}IB4j(((9%DAbnlC%0fXoq&G82*#_c*|
z(Z7CbO1HSxo+Jt>XVxbMCDkEU@c04m$AeOcyjVoa$3Ew_gz@MH+ANP}4n4_H@Y0n<
z{jPZ(<xLhSem8|J1$sFb>HUgA2SFr=y*k7G5j20|ko&tRU7aBdt*6nw+RO>RWBlaQ
z2wylHTV-URqF)RPI-s)9ex*9UvEfx5O9p}$l6Di7{)6(Gt^M)c;CC3t@t6V81SAWo
z#YNk1DEFpwkSbcW@O=O72viK)e5-KTXnxAKJ<BassZ=`3ZDWZYd6&GOh+$nuvODYE
z_Ita_D`Q<HC88ITMBo4rVm=NYZAoO|l{H*uIkD+K#64E-<#Dire9OG6>mhMTlE4+=
zRJpFsq|mM5UUd+Gm`s;uw&#EW&px|FhBe*je~*P07LvA4A*LIvRPiW?rWj_X>G;E4
zHQ<T{u>X-uUvYsg(*V>EZcX65N>X35c6XMgRehvAm(Pxh1ERhjP;$voeGTCc7bYuw
z%z)N9<V<*(ZWzwLBnAI#B+1eu`CTjz_&jwsLUHwBBN@m<B6#zQI92{5zb;|(U${uu
zKzfxwGyqSF^!iBaW^mK}z2dLG4=$QbCUU><fJb^72p%N8?Y0Hn)s6fI1q8S*WG5Um
z{xY=tA>m0rQyJfoZhD+@uP->Ewn^vbs~bA&dD-F+567cP{%SiC%3GmK#&tq@;a;#S
zZ%kJ?(Snz>Bcx2vDSg-BS7Ur^67lfCrENM)BQ@G8i4g@w-(w1|!`@3@UIE6Sd4)><
zML%ZH9O+VkG^MgM6krqJ34g<sDX~H7>cNoa_>smlvbeTztIKnaYodu`1QEEY#YWHC
z8^03*uPXBB_sqd+zWSk1j$OhX@hCuu8`A7qm96D%-P)2Ovj$_|Hx175uQ=+LSK_o&
zNa3*CcTx2UwZI~pGv@X;aJE|%ez%j&yV|2JvVP)m<RB^y^wRk=fwq-+r(%0)jNkL~
zSOD$f4>5Yzm|fw$`8G^QeXa(X=lGl<WR?2r$yE*oC@9bCKm$>2@^HKl?vE@-cYBe&
zHv)Ohw*EKP`364<0tj(_3|V{JieO~5(2TyE2#U2g&$y<-KSWn1I41tXX~QWxRf^C_
zoN^Z+_gQ2pRjxZG(387F3lQtgdNc<;+!XM+?wO}7N{tIChR)0fNVdmj0{JUe)@)ZS
zMk;et12OeZt~)*$$_rQ$$T-=coo4K)hG-%kG}&e<4%mW)vYERf(nMpHTxZQqxy-hF
zbsS2^F~p0GIn%?w*b%Sk9Mw~9(K<XOF4!Xr?j0vs$_d6vu`Azie02M3k6Y4S+ppG`
zk>?*p^YvAgH0r6{7h^f$>7sh!UTQ{<$yqA-iQCrSXa18iCUU!OVg^m52MckO-z6;L
zZ1<`Umh-?yITl`2dJ}jN_Vs=(`6sWq=E&6@C!pWfXVG)b`#H#Xtjx$Hz~a#eGlULc
zq4-3!IPJu|tLZ&wIIZkpQhg1Db4SDtl~03{xdKVfHbtq9d}5O<3NhB?fvOJlGfvW@
ztuXU!=MCq`TO!&#hM2CAN|iEjUCwgw=QI?dwymE6M=Qk~9ZK)08Td(rgAGC}-HFB_
zj?`Pvdk1Juso$PNk->mx$tp?6I!F0ENHE*z2a{2kadNpP!-qo&q(PU!shRb3tL<_q
zxU{7#WAo0^dqBodjY*7)ULsC<Yzu;noJXVm34PCaD!zNlGz{z&Y1LdzX9zMpr(Hkf
zM1Brh$KUnDOA^1iaa=x{^O@Z>J4e}G`eA@8f=LNJTM5)VH3+B8AdVbzPwqzI%vNjp
zZj(eahX~3r<=GpuXI(|+I|)XNK-w4Bo4j~;9YSTI^C9`l%9U1`T!=(cMveroz?Y(3
zX*FkqQJYJj3H;9*CEQk{d%NmNxKnwHBT0AsU#{>MVaH34fFFXHIgAgPV^RTW?DBC#
zR+h^k3T;osh;7)<Gc^UKs$Ks{vW7g&GHOSvv}YUEngpIXU5=-Y21as=Lbt~c2MKFF
zKR9pFh!-Ap%#7pWpfShZBo(R!wWGmT!3jz~C}}}x8yox7#nRu9tp2_-aGfUvU+Cp&
zX&m#;X}Ca7!b%mFQ*?UJ7B7-h_3|iZC71Kdu@RCZrFJ{hb}Cdk@xlfA#PHt=OkP)&
zdBvG*Q5FFOml8|nxkK!j+RN$iNZ3&gc4J(0KX`s1npPhmkA2$Jwy`T`8_#l0+cn$t
z2H}9wG0Tq(qQNRBZU3M!Nes3sRR}T;qP1LT-~Q{&?}*cfs(~&w{xz%SmZFWkWqioI
zuADKA2mKHj*PeYtX2nq9m*;P*I~`H;MNEKpq0Oi?FrxJhGP+FiW#3l|O2LSU&M5!p
zqe(!g$$QX-h@G*F7>@vn$vgZZ3b0;?z36<IA*oKqYcdk_+{}%Z?f+a@E&tzzrAC|r
zE9sayBd7|;muR=BBtWVw88R%}U8Qy7>qR)!H}qdg7QM%w3Xx_6wu@u943b|hW`qh@
zhDbu_wv|sqTVt{3>szQRw~2y=`o#&JbG5^b_eIT`F+bG<vw6wFGZ8vlKE*n`YMaL5
zgkQkk?=f3QMBG*aMQBIRvm8W2zDQXv)wtWmFOs#!-iGMS!ipOHD0sHoWFzMYw|nf{
zie|tiF_Li&0v<|$U8wp>p^uce>If(jnMFh1wD|J9+^<w2Lf@~^e%dy{E_Hi566DCA
zho+Hae%X1{**nfpQDWwzjxP5f{?6#|-KLNL2a~c9$?{K{iE6E!obR~&D&lvyeBR8?
zG>S-(!8~b)!pvEurH?3dOhYl{a||8XOFz3tzZen18SysoEUh|;=>H^wXA3IR8GJEU
zjS)7YriuPj<sc|_FeBzWZHW2hGQAuvPTZTTwE!Y9UO6K}-UX687jiY(6B$~zM)OPK
zg*HYM#AKVm!7jq8@I1RJ-ezZyp+_msbag-mWj_}a?uB$Ubpx`U%|#g>6)r_<!vMkB
zaE-3(27SeUP~!Z6u~$5%h0*I25NWRo)pT)|>+6b@@8iEDq406NNfEn>u=B#hSUfJI
zh_f2O6ks2xX(%C<vTUt0m7Bg6R|th^p_W?HH}S`?jQS!)%zaA<4KI~iu#jGqSC26)
zM4z(rlHuEE{1^pac?Br=aMzVQjid&O+2-Pfd2gE!2n;7L2dPZju~S5n#m$#AJE*qd
z3bi&`1KxKE`-*zUOM~EVFaJT&Y~y!a?Rv{%Tfy{cp~pHy`c9Racrs-@my^dn)~VQ0
zcqO?bi~UZGINtT^bQWG;Z00$<^02l}XK6@{jK$r4&+QdUM50=xLfdS5$S;81H-6cR
zwnHCnp$2k2xpgY8Y^&3kLTa3VB_#LAtiBUDZx)Sv;&W-YaKpw)YT<Wy%bE`H>V?D<
zE}nV93{r}>p|r>|R3Am)vpU)aI3R&bX*2N&eB7ywF80epUwWeeQ`=4vifnYQ$yePB
z)WAmRoX=;EB;aavq9Nj6W!Ca2wR88YvZB!%#0G{<;mIorB~xl;Zxn0HOCDI*Z+T*v
zRrfoe2#6KdPEyxc>9E@`y~c+;UXSZH%zZT}3M4X5%&{qwD=s_o<g>Jhk{+H0dWVu6
zi!e9(_$`#v$dUi1kaUdu0dI{hHVjq`<-XKS=UfGoB6Y)sp$T)+9>Ziz7**mCwRa(W
zmU%2|y*V~kDQXkCJ*YWFk8is2dfDcb4)j@r!k5ziI`WfVZzZ1Kh)I0y*CZO?<D|8C
zasb5o%!IDU!d^AI@RY!Myi-RBer!GBu3zb@IN(OSmiNgzt#nLz{1uMfX<_#d3LDCH
ztRIy;xFCB`HTUsf(&lZ&O3@-;YSEgI@MyPTo2K@(_PvzeRNxhGO^f6)8A&cp;sZ@t
zZiOxtfHu>htAMDIdT%587{x&11A*&@%FYD?xpHY4+V525wft@LB6?QP@bG85_Vzdh
zFOHnZ$E#;Ma+UMK%V%78Awrs`gBhVF0A1Mp0mt^|P??`<Z-!)<wop83Xw%xCL9^zo
zr6lEELufs??ltHBHzk_<zbUaGg1)~S1+Vz~q0{cMwxh?D)08=Fyv9&Bb#71Vw?C!7
zxF7oTt*qMB_FTY{QZfGFP7|#v*mdVCM+*>*U?I&E-obAHk5{=?{cgm>ciOJpE;3Z3
zSq<bUT>8ei=2z});_9PUT7<6E)TC%q<iJf$VIQ9UB-bQgEnVwRa+&C){1gJmTuP@e
z)`^b|nOF2b|5+Z|gAad7*NMLjsuSa<jXHSF@3&Um_o5h~c}W*U^+rJW%}@RG9jz~o
z`j&1(LJ>%vBx3Y&u344umaWrk1nFdHrFv(vWBJrlh^Z^itu2OYsxx^tc|}ycHS2E5
z)|}5t-6-4q>Jt@I<*sfOvsj5egWg7!_A}k+PfTaPvvIWyPUcbb#A6h~eK&0X;m+H9
zR<v=ZF_U7E&fXo_QmC&JJd}>Ma>6_vbk{}MOYJPe5;KPb4t*h4sQs5L7USYB{D!{l
zSTITJWa@xbFjl_*=|`t;vTJEhXo(!hfD%PDs1I%BMM*4^PLGfh3;I~qm-hf=l|{;=
z$$Q!Dq<~Y)Nk_Z*^ZoRf#jZ0Od_!4D6MWiI6*dc0)^Thd?LwkZ)CQ_(CdteVOX<pp
zdM=*wU9ft`WmuEx(b27;xRrSiRZ^}V%ygRS+ib|dQa(t$=^#g}3~#Y(J7Z+-$20x-
z*HN~<_|nbzq`<-FWN570qk2ShKAXEBWG04dAufaSC9X|@&dE+e+?C|;w?uHmN}~DK
zGF!R*Xzqtavf=>DCu+|!qUKy+v6SmSC~Z!PY9=7k?dvC_0=D3xNH-@sMe~6GH8S#8
zr<vl8S;UA{vxhD(ITzCqRHxPdV)pc7+s7<PUin%zQdu4lee2_Tq6~_rX~IX3wcglv
zUk#(Nq8jIVdA+0c3DUI#kWUV0k*gd)ueMBzr!${AURvjLvYLjg3*`l7DUp7HK^Cw;
zfZXHQ-}a>z*z+%KoB2M<qeX!5+`xKKNP)Fe4VNZ$pZcGKZ<_pTLQ{d$pa!v^#Jh;z
z)8s+<Ow^rYJPevza-dPtUveB<TzJ&GzXN~?=Ej7RM5_~yQK$90r0YQ{BmbkIrfp47
zVn6R^?oJJtI%{haSBX0EK#8a=AD^A7`&u3S;ldyB<N2uH-YxFgXL$$!LQK96-vYTm
zr;Q*ztLAxskqs9~%k)pWoGAJ@;D$;1Nc8y9?Y4vZ_K1C+cvTCs7=BBDFEUK7N|rWd
z|5EA=?RSN8)n`b=F03%w9=0dNMiAZh#|2#97hpWn8cp?XW#lAgA`@SxMj2Rasrmgc
z$8_JK5t*#$btIWfj;GA(h5Dnm5SIT*l*2;<wBJ+J<~=guYfV@He~l#>8V|z76=|vA
zQfnk$T>L_NiWOJ%4+=qiG9aFaaHAThkahn+`Jp&HuH-qmL<|x!Wc>Nev0EhCotPKn
z=)A4w{db{yJW5EliEWCpc^lSCBc_*{e|#RbHCxj+^@*$5t$jU1UoV>hpzkPbQmj3q
zJ!A?YhzFw2OO6iQr0kSmVjyiO8t-CcRKG3J)P(&&cf=5wgueuH<f8p+#6Q3L*N6YF
z=-VnFsncQl_n0}(gv4oOCB`)A2NUH}bjp8FLM)eh=@#Q<$g1qe2!$x}tXiQkQ9B*Y
zqGS@{0ddSMUnD1-Cdak1Hpoai%se_v9sKN~r{y_g7%smWN6Syg3oN$UK&d-GK{@t!
z8#kUQ()aa_*GQzjibb}^geZzIC+ug42-CYn|DC<nVEDfA#PCpnUIQ#5>70zsbW}ie
zFzZKatfu`9(an}Lk(;8-t|YjiU@xY2>!OZe@JKnaxYWL^LKI6r0J$okEyj=_-`O_R
zC2#&bd^Y&yY?eP9X=R;?Q92}vvfcng4M&biu=(<9S|=5~S$7i0qdnQ3yDwCZH2|qs
zcsGgMNNq;fFJxX`P7-RyG-AX=AO|os$9vmpxf|CIn<&5!4UXt({t#u2k#c6-a^ng=
zqteF<&pPmYMc=3=YSD)EA}kBc#hj~XRRuJ6$p!qECjecDlK-fb$0pa6b@8yUsh$t-
zO5X!krRTz*;j{>8-RZ6)$p3yg12l&q^pU%L?z7Jv0WfO@lOg8XUped&f%JdqaseKm
z?q%RnlP4}&{)PYdueB<G4M?*%Rr^(PjLK+KfcqTrpz)qTSES0LR+bEqLu_y+wtNa<
zJZ21~w?^){fLF+1%zUSk=Yj>&_VrFP%z-rst!R@Lzvo}pwz@q?l>2bC9ab7rI0cJq
z4b@TAq0EDkYuRI9vE=6{PaYVgWr^_1G1kPM%S>5-o4MX87gbEa<TLcxidjUnRoOZ-
z54!6g+QcWx+%}==I8+SJKm<~|u~<orOFY2Qf<8$VUKzAl2Pk$@crC3lYo|}C1ptiM
zm{q8UD14$%iL`je?m7WJI{oWPGs>m>btNA!!(l`5llj-NTsMWkor(|M4t%KE@a+jW
z<pIUq8cQ*loK*D09PwoOpq2aYt&J1_E+|eS0ucWSe{mSLS8Ws4V__WR-;~d*a-?BT
zau^U#bn3@h$4)9NCjy&CncIBJ_6d64`_*m)#oEnHt=ai)B4v8f>dF>z#Fr@9U62}3
zu=_%!D|M%X{scHmYId}&r=bo%Vx=I1Jw*QR$Q>Y$96u5Z3K<w!E*0!)tsov;K35?f
z+^o^O{!IT2E6vckL=Aywh5NgNLK7oy&)%+bOce#x2DU(i$+c6U4I8TpdvYQjptOK*
zpYR1sJwAmofH3f?KbKD<jQfE9dvw5mA6+zZ8irf2U|si;qYY_`__$3<s!?G#?@P6*
zZ9yC-5gm349;u%RbjF5`(W)-f*0oDs)73e3<CPV)b|wg9(53K7sj&T0%@wo}f+>$4
z1yA_QK4#-?{svZg=fdIto=1gyy`zAWAVe^-=uj6VXunYR5XD95ac1yVWsb_+kZ66(
z$Cr-tb0X#X$f>TDmZPqw2FLo+qeq3d98OafnW>$rvqGk}z*3(@glMVE1CZnY9d#K2
z7H?Y>1NPG<3=pTxhpr?6^vW$X-*+)WZrWO_u-3pvNzfQw0x*O~fddf8^G!A}Y@PJg
z@(^tl<z_PAzf%lYP@MhWv!&&^*^khQQQ)iz!^XM<$fwypyy@9w^rjQ_g0~_x71WyW
zWeCIQ$~ib5T}v+!n&K4Ild5|l#f3=pFuQHIPS=9L|2-wNrE{NQCUeACq&RDMja6g9
z4L>n84=~81tefoFQ;aqgg1%g8yd@7BeRRJ$d(_5FWuEC%-;38F*EUEE;a&m_C{$U%
zCU!^N+HQ~tj~6r3iG#xL1_47~g)S+CwB>0*iv4di`4i4n<B+3G<Quh^w^!}`jNq$g
z2b2R3w?{4Byx{|!1g=m6%RrLJ5+TiKOthm1;lZi9UEpt`Xg@Z_nWTayA<p)zFV!){
z($xz6Z6fb;<IVgX@)I<l*tf}&Cex#?uB%(#DyPI$_m8l|2wFfcYSguU=!ertyk^&p
z?J9OUO11nEDWENs7lklQo73)9k7gWUF$BD;WS?+ps%x^*$lghoZZx*KJ(}q;Fe39_
z`TW^MJ?gaTu9dI;<!XpxblyR(9=O^8F_%>0QD0dj@@m|ivz@D;d5_`XFIA5>RVas{
z#-HoFhk_<i&-dBpbsFm*=0CsRP1*R}zKc3s$*}>rwV!IsuER_6addR~2gPr}S9aA?
z{d}}k_MSi01alE%A0%9u*FyXc%6UlN>tE8Dk_2D)Fh91&xLsx)oL7+%<~S%5H8q8L
zg5q^fo{vSzYD+|X-i)Vwiebv0u`-WkJbgeti{Gf$)jv9-73oXmWPmr|^Gm~q`9-f2
z{;YNmWMPA*hOy)1D+te$y+*B9V|zf{Q-D7wsnaNrR|c^1P@(F$VV5D-vOQgo92rHo
zd$U#%8ZV1!wzsd*i=ruIACzA+<mOkG%Q2Kk>wsm%TSMj+m}kU7<xrfG+-qEYk4`@}
z$A=2vT9Q9%tCvemD<%;CMZGx60rY^`q|YY739{2_EIKi0Q8;5R@QLr4?P*5?bF()b
zHSE}npV@ajIe5^)-^N~Tc^2mPiwng$3(XPReFqiHSXv3=2Wz`mE@m;;tySe$X2ux2
zeO>9mkwN$nWI+OFTKn1zx>}m3h$0nYab+nEAIP`u&U;O>6>Q+`Fq*v<BV~KE(#X4z
zZ|(5_43S}zIx%4LS5n<st{-D9bf4g)qrL9O*?DR})C}Mwo*IDS1}E61XS=?^TAHMx
zoG~J=idZr>&y$a_IUTB<eFN758V4>6x9Bo`T}8xg$G^+%t;<~xFeONg;>VSaW`hw)
zI_!3(k)>+r%Y7R=5=@s~i4(DrO30xHlzU__AQ<58A?!fq6Zv9v+OV-=XbY|1BZT5g
zZ{bn1Kc*HNG+o`!A(#F^%K#rSG&z2+MX37T9SZVNM*!a}fu|Mq=sq+i{GPEf`V@6;
zQ2x}rbah{TLzn?}vBY{H-rvy`btt|BL%&J4&@_=LJsCzQndqsLJi}$qm=NbTL^+kv
zJH42nDOLJ$us0#tuH6((XkxSu#SastG9$9LqD_v{J?y%H0yvF<Di611?H3pcUqa?H
zqhORtbRTs`^#?*U2*BMaEWT0X^Gq?2=!;5|my@J$W=pG#@4dlvQ7$>KZ$kz83tG+<
zRnjU=YHMVa@ejYt$-?4`21nz>4guMoFImX={r=jZzeNjFKQl4WerLyA{MQB_;DMDF
zEyFIL3SA4(thT{ihV|KT&!AO!<Ph@t;jZll5IG!5tj%OxPw-Yb5Lj^b)%!)r<n{m9
zAp6Ko7EVQ@>-be+@XzowP;VDg*)4abZ5&1KueIquX!OHF%&TqP7}z*8h9##s;j`96
zgH8&IlIYH=jS13)Hp_F+`Kd|44r;hWy*w}`;#_3%1R<I_-k`3_IVPCpTLxWjzrH(K
z0N3Ih4VK&Heh&gV)2?tl%h(R2)a;2wvG|Tg`dSBbw1U@G3kOA)j+A3=zbl)s;JQun
zyu-G}6h$;nKyu+8t<#xvAosGh4ZUF}XPN}2mn4LL;e#Ej&5duD3?yotpZ*||Bh4?$
zQ4PVC0<e{;m(tM@GJniVb~dYBcNR|<E8vr!VVh3||D{GHpO9WdGA)10B9S<0(#aiL
z{b#j3n8(DiMoZ5L$^m>o?}rUxMcc<4z2Q09goPb}4C^$I7^H;1rm%-51?EUBHi#32
z;$JDL9QQHK0vR#BZ}-Cd>Fe-^=JmlY6}{<DNjt|FZ@g}P;drEKqo_N=lgbXf^}GWV
z|2BTP#^Sa}SkKvBKSbgd-*_XeaZKRV9DL>DI~9~-E~aUKIw)ah<5>F^LjkP$J_MZv
zeWt$p|8(`%VNrEqxc3l(B7!JL4WS_2-2*D!EhS2bfJk=?h=3w6G)N8IDcwj79YZ%V
zbaxEHyx;cJcdqjv7uVjvTI^Z-iQj!cGFI<~uawHTTZ@7in_H#7tYdnlh6Nt&aB|ig
z5SGc+<vL#2n>N7K{+T$rvP1V9Mvr;#*ok<`o9^;I{|9PSy=-o4w@o@L4E$5Tv()AF
z(l^lA%s?^BS-xikg4*?i&b+dfnuD4AXg{jfZ1UQOWpa43kbQSNWW{}bSvXoN$=Hzl
zY>zo?cQa+|%{pZl*;v(xOnl>k?9Qb22g1ah=wYBHM-vVErLK4l8_9!s8Nws3b9Q+9
z&~0GsBl8m-pq|<LJ1v$jtkQyg+yUQlQL&or&NlK2rXn$8M*3HAoSxnmduM{t!OT_N
zYpp3c{Uwb^hW0kM6Yf;?);Zs^yd0bmYT3Fn*!vIEQw691?FW+zcW!2C{bYW79lr2>
z`s@<zXK<_<-7hFEprjv0xXHz-;FJ;hVCelxTz`)%J#X}URtxOc`}(tZGDN?Sx|zay
z1aY*0^Jtl?49Cv{dMhTls7CX5XzMq}yTnW@bT;kq6PvU9_{xEjr_N|YLTAelg*Bd{
z9-j|h@Jjxm9}M5E@t(ix5=s3v_x*i`+ZI)SX{0oG=Lh|U9JSSrqBtR)y3ROE)y}n^
zh9#OVn?{Wp*U%e=p5gV(noEisAkKYyMIw$SI^y}O&vo-rX5T)s(J^_-?uzBa1l-(J
zIIr47_X_1xzO-@W47C07Ro&g6!w%xSv+&$OWC-JxP~Ms)W~*b3w`h`!u%ZC8f>p;N
z1|$hZ7aPktSFYr>mfa8;E7b8Zk^Z%%eho!sg`tc}&cOKEu|6K73<I@?WfryLH5;?U
zGYYCjq`K9fNmPIU*~+oE2r|%+Njz+!$*bzQkRZ4ZIc6qA8w#-y=h3yj%Za)e-ve*g
z+ryiMxY2y&`szaa_rC1~f0YEgmuJjqMfyn$GLPRBu@1Ia|59Q&aKlJ*#wyOvGQsSY
z#>*JA6IHDeC_1Uhe{VVC6`<b4erj@!r(63tD5=hJwYZcdT#^~Fq_lcc83ZpZJ|b38
zy~j#ukGmi%?5Rk2pxNI63%t+h0^!)~ffBq6D*!%PeFL!Tb_Kn*dgXFv#@=sAs*?1<
zz{T&~eswTrSBEf7fB>tt;Ynw1ltP|U>H4~JS_YJ83i(1VS2XeMO=^^8IsYA}l;Sbd
z9>dt-0Ce5j1S1S(f9T7Wzzuf4l_;^CjWLQHWv;I7bMZ30#R)<arSY?Vz|trk3|+<a
znp!A;qdcN8B>whAOj{2QRuy$O`zXb*Ph4;C!ZYqHo@r&@zf7Vgi-S)$OIl5^0QPLG
zeT0KA8(#xM)VM9Ki|=cH{-#}g7`>hyw4<^iwykNOZ?rV0NL($)!}JK4W5{}9&P(ym
zk$9o_FlrcA0yEx{rvle^kw6C{kaepyky5eI3mjbb%`Sc$mEu&`mZ|B58W<0s|EqJf
zUj1wy{TBOOM6_Rj!B7FMUP~W>zJ+hcDCKkDe2Ws5%I`znYZ$iG2lQ)_G+n&!CP!hh
zeM!o#{1MN4r0^qyhe(6jhWAX=g9MA2{&M;0-BWcuk!BUopH;7`KdmFZYKBI-ATcSb
z?UH#7^ZuQHZ_uz?qL0)t)%4SWTQIDF+%j6cpD{`UWr}UUC!04R{j1=Ct{T>(v7Y>?
zH#K8m0dLnD97<r{h!VV>d;)G4R=`s#=<cDSU+NJFlrbF>u!VTsn66IhfHN*60?=TF
z77pBo&ouaUCU0_|afh8VMd+czUhrua6_54lH(7nGc+Do|z_g=Ah&#~UqR*fbYIV=(
zBrV`$**A^G{=$!@QHtcD)_j&{mXY21vw`!{{`LxM{ZIa6YsRw<Jy_o=GpMRhIsEiK
z=;1IE{u{d4jC&wAddlPOl#NMU6}r!4eTnj7%z+Q|F$n2uGmRRZGp+m2<wNJBk}VfH
z*$|q@W_3}8Fb!ncc}aj}sFW(>E~2D;P4o1;_-Y6E=(2_O&Xu!3Gj!TtSe*p{D$k#C
zB^hRpc((7J)5^Rp$6??1Il!FVOk9-l2tGA8VFaCVdF^1wu(c}ybVsk#MHj&<s#HyH
z1f4JU*(9S?#_6*9P<MVso?iMuaKzEl!`*_`?pUUlriU3WS<g9ERaTPlHhnEVZX-RD
z41U&QBgfw07WU;aL>6?U74(CAdd*RIDM-6_C>UO*_f)|zi_w<EMNTRJ4)YmruMK@@
z=rvVekGCnD%$;Z1f{PomRZl+lVmQ3lQSUQR{<3M%%7)`{S*kb9Tm9p=F=sY$85ci3
zc`WgR4{8DWL)P)6%wFNs#-}BfLwP4rUl24;cQzyc8{aVdfUGG=7sDFoD}bJz=?{JG
z2?k+?e?RH*7oZixwAIQx+9(a#KG&D>3u7o_aBR^Ia_1dS6^=7x7kZaCuO0%e-dLJ(
zDPCtVzo>mRsy_K4dC=9t-8E#saD|N?BdjE>y3}y1mMkqR1fMsAp3VISIx6P+Q)B#W
z20!i70AaJ}e;`VYTjpEn`r|-!B4p5C4?EGsa5~q1l%oM?GZ{{2ZvF#B8f1?(ajk%N
zRIecrv_NL!qa^-bhi|@Bw=H*v&}GPw9>aW+wA9KLRw#7WNHc9T>0Q_ac!q}uI}8AJ
zS6+aE@PEmWwLhcgJ$KU3KlGscy^R%AZ>qSHRUyycHuAQJ9Hu$htKRDDVST2|bd+l)
zpE@!X{UP~9N9BgiZmN~y%z??O;>41cnNfZ;QF0iPQ{zH-F(-~zI3hC#7*8|sHgV+;
zrc=K%KRF=1Ul1A99WbR?GN$0`CA=OY^IBX|cT1V{FlYnUC5k;AO_*fP*JgSwb4fx-
zCx)O1Eo$?c0a{CMok%3kh9Nq?Q<j-mg;WC2Td=OWPq-^#U$PxE?r*8(#PTgsT@o$`
zFFq6lvMqXN(r&|Fvjn%MVvizGb-pup?y8;LH(eE129XLWBJv51sQoIUJX>OQsDJ>w
z{!*YKUP`gB>gdfr<S0h_XHhnbo}c6$Drm$TPzK-SLCnXl1PZW&|GU-M#B~7F-IgJ@
z>$)Me?U-X2_L?H(uD~09aR9qgyw<+U^YU2~g=ij^muZr0FM4HZck@5p|C+!U>CK-H
zU7`qdEKg1R)lLK$ZxUkslpr)*o!i@f4Txg^_G5Q~FPQ_)Yw^pyY$$3?b0pr}jK2N}
zbVbi7WPCiZ;97B=ljN~XkBjqYKW3M=ZYe{)8-6v#r?i`g_qgmTtei89s&)Hczp~{2
z^eZiTC`xl)YGwEY?b;!dsQ19otG-gU!!Nq)Q6Kf`?y0f1GX1CqvL$z|_TPXiLkb9w
z>J0$#ZM%rwZxa@J{jWlO_ZC4V7t&#^ZtrNux2lolwPL&`ni<L;(_@8osk8yYQM%)T
zDP^4zJMIlKRJ16oB9ZZ!cWRYL*>N~<tZ9Gv=U&TxY?tks9G9#>nswlNJQCN_y1)8N
zKfXBHW=Nh8-#5k%ETftAyn!-Hi(II}-w1liRZ^dA2aImPo~ML6GAZ=sufK64INn~V
z;Pv1uUz#&+(cUM<H446b3$jF)qZ>A-H-Ia9<0PZ6!S_OfwNr6gH(&9GEULsf@YHL2
z_<~W*EnRF^e$DcwG->DQ!SS9+hMM5-08^;S=F!FGxFZjDXp+BIq+s8uT{zbG*jq>L
zNbOktOe7MDSNVu;?lu=i=ao0TpEb!WaC0Wdt97hm=`Q{K!@+!0V(}Jv5?9Jc>6xe+
zmMHneU3+i-PSXWt>d^wVfZD+M%OoBZyJraOJPrT=9>UjhUoryT{%hC+%zg7Sgm7{b
zedXb!bsBQ^43qr<%~e5l5#=2Gz9!SFn1&B*rxxzwkKiqetB))MB{lHv8%K!<J+=uq
z?M?I>!v^bX>JW2fIro#P@UI|S%h|^hnJ0{m2@hZAcV8hRoWI96j80vW_J+Nehmxt3
zRG6A%5lAv7^2dyQkADvN`2`YbS)GyBSj_F~w4OL?_TwXK?Aues3G+BzchrVabx=%5
zkBYTX^ln9}WH&hRZR&+1w${}-B5is)Pil1_!}Z>2+o6|uUunE4uj7{b{U1SKEJa1O
z=`!FH%^k1FHQ>p?HgcqW-fPd@D`_zE?T{(hBy8yK+XTYJ2wfWC+{#Huck%MWGUgE0
zjRfwY)~db>^rT0M!*E6}vY+-w{h4U8F&|aT>FU*5b7WUm>5?YB3hH)cY=(#RTHu{x
z+5d^DtVM$^B!9ZMs>hafC3UI7IHclY@9&%s%h0|p`+~1ERLye|n?ohdA3Txg*j+Vj
zN$&D!Tic~nnHrI2vuHApC|c-ciV~wLzYApRbyCJ~O1-`tf*z~@Q5GeJ4H!`I$b3MN
z9r`41LX}>PD?m4DuE);HTf|h^C6&0tLEqX!&(ks9cdf2P(Y_#(&Pu7SiHux<og#^9
zTj`P_>2KWAhlMKE(K(Lz!ND0MGVn5Y`6UKPd%;XfQEP89)G~W}XQh<RsWM&uK_kiR
zEMTJ50!eTi#233Qz}`BZ6n^32I+T67@SqM{?)ju(A;o}pQdfl1Ch6I+;?SFP$&+gI
z7!RM%D=OaxV2Z2lumgG3e)J+aq~lB()>r%Dt%(9(_%>uNChQ`Y-{2yFj(p)m0~1sX
zV;H_FrOjkV|MK*MmeunYHzty)dpD$=t@p~t$>X>?wEKsKuV;Q4jeMLc=gl3&Y0}Wt
zK}11moELSZDdi_}>W-&#@_X3hOIb{-Mn`-1eTdE~o+vSxrm=FA)mQ+G4dJe)hfS1A
z32(xS4LzKi97~u*14iCEqu_a*)~~avYL%3lyi-hg5zAkS`3>+z;oGyxl<0M6$eYEK
z$!^at;<*JR@eBbqAbFO4!mK?VjRMI#I|7Q2l%({%yJBmB+zTN*;w86=$HYL#WB7Z(
zMHpe4m0Ot@*<7U<U4JGahr@ck#^lUKyl=!-tN1LKZJmh1Q{cVjCRj`G)qY5A^47Jw
zt)B?{vzD1yj)vNAzr(1hA-ur+D9^+iz@)3oIOG1$k3UOH(fbhr`9VEqxgIn09D+q}
z6n@Yaj&ffJzrNM?w8~_<Ef9_zTZ>Og*Z9e#;P7r^YMhxZa!TtS;a-%wjZpb$e|~Te
zk*Mg7Y>oJmhuq~aJkcu!0)Lr?(!Ed5meIuGlGX5mGP}YryaihK7jIqrslXpIt3%#5
z%a$|~(jt6v9(++P9w2)GI(?e~WMI?IF|z?j_Gl6lygg*8J&D>08-Db!J1BZeWA=GM
z#`d!a9sxv_H2e4Jf)eTB-n&wDU@cJ3`I@!p{ug~8Vp&EuNw*ayV4p)uhaZWW-_uh5
zl&h0dE|QWseQcss&BcGOmyRk!G2r4&s<cdg(n(e*UEn&dbBfrW4qpz{6<TK%s|U{m
z5kw$;*LA|*Q*c3dQ%~{*9R84e1E`Lhf?kM6harog%0Hs2xvQwfG<GExidH7O+>E!B
z(ru|b&l&jAQ0CvtyJ_zI=PoG<=syVnkd67xBdj~Tl=S@VXm0!1G(2%RK~uXX(f557
ziz%NNK9Tgl8@!;_sP?U)_G3z{{6n!W^)Cq4PZr|0?R<b^{>=N*`-~J)WKs8vJ?eCG
z<Tu|COua`io<%F3mTZ7s$g%OhKITJh$Jjvxbo3W=l@}9!Io)qp9z!iv=kQ0l&-{Zf
zjs{tM!JcAbSN)!1eI5oFyu07xiMdI_0(4WK<6oVIA=PVLsk-w!+e3&)4#lW(E~^fl
zv#mXA&-z+To|uJuIyl_PB)<)LC?75^ZR^}aMLp~R7m<h)vXm}F)bbM=YkBP`2P-@R
zi816U1soT8V|I0~5?e4fJq*eLfZfN||F-)Oj-nO+Q>3%ys15JUg0^|(Zvp-^{}Uzx
z8g>Ey114%@O8qGc5@2_eRRktDhTgs#%l+`^yn3E<!*5ZE@a`hLU?)8Px%M--9g09X
zxIsGL82i)sY=joVhv7&y6bmo$we%F74*^VgNmGX^lLnr*s<{vVnI6uIl9hgcrx;Vp
zdYed<eI|R@WxsYkOHM<b*}~tOw;o{9&gxn^`&*eMPabQ>rkwUxtDRSxzw%u5-{l%|
zB!-mu8)_jPG8|^^TP1D2S<groZgecOdjd{0lpf@Eo)-?SUsJGUivIIC%+`%+{gS?5
zzNrKnW>}SFTU*ixwBd-Jrukx4P{w(dEQg0OM|uq$4<n#W_97++2(AK%apK+|<23P8
zU=mjpatmnKyro2Yv=;awlQ-os@-7g5jeUi0Nh&TLCr5hct-6C@#`4gV!{NW^uiL|D
z?hki0FQ;z{Ggz(X8blPNL*9jttbFoX`o8~X+b2?z0yC8M-32<|Uv!Xb7!&li1Hnvs
zQiLxlM{D8e@k1FB$@UWu6tw?Xhkanv0kbbN7Ot=q-q?9{fc)J6dTJ~YKf1A~pmaJd
zujRZ_rX>cvPmT<qC@lsU<%JBW-2a;tqp}sblFy#Jz{39h!a?SI#rE>*b!oU@l+)6#
zv?j}vGGdvoHPZDR)g-Hl5jQG|_5Q-|gRg=nNrq1baZ6)f(9I3>(KI^SU+J7n`vGmS
z&PaxnfQYf$=o_*KdU7Av9-Yal_0~v3+BsRX?x3`c^FM{;ZytWxCkLrS+rr$c&E$Cy
zIDGe+;)3U^Dlk-N#JS{Y60qUI?OleaF*@cX=A*7pTj~a%BrN}63TC8pah^OI{XE@M
z3!AmHs;vtWr5jQ!?P-6)K3LQ<*5xEqMHAyrmHN-bsNH?mZ+0Vb+po<Oas?zZL`g$U
zfH@W)!|f-)^T)o>BsvbdLe9Nz-}L}=!Jon<U@mfm_5T35T=@@FRq&u((d5nO9UyMd
z{txuY7}m-}TIzr$hki0<xN(3Li(czg9#X2iO6QK15{Sf5<2f2E#K4T#YUy4`(>)Wi
zrCwh@!V57)KQ(J#aDZ(6g#fI0h|4mgTgwzuY?0!$lFD^YiK^*MyG2{f3uXouJygZg
zGUWVOImhbZui2Bt12fI|mHD`lS+hQ5Pw<qJ7Pbs+$jZy5AzotgxKERUdLzs@*{>r)
zPi$*pIyuZ^b<Ag?iG;l&lOV(P^*&>SFOoh!jCwPASXIHmF*yLYR_ApDPLR<^6~gPy
z>jyP7^)ywzD8zJfP_T`M^N|iK`p<|sLkJV^096J=B67t-@Vpn2jwh^&AMy<rO~`Xs
zl6BnjiWk36?}WPx9JLC%=Ry--L02^w=t;bVY&HYOAM1bLw&FpYWj6T9L)Y@`<gQYJ
zuR@e7Gfz~>{_yy)v81?$hhJ(|?@;PoosK{fgxCcWs4{HgLzfx=t^CE#2P>0ryK~a2
z0ktVQE{*Gjs|i~-`b#{b0?kT`_YNOvkI0pX?Cql>bHG#FhmQ-zbSFs?##<iuhkQ!-
znjy;lD#gDE6fhK4$<;l^^lW+Opqwe4sw8XizTI-Yy2K#^pFFkpl+sN+nkH^gLq_R(
z=QP|40o1hunQi*HuaR42d_-mq+A%ZJf%LR#+Rgd4@s`JEExfC!&^S+@FY`JjQIYV%
zOhQM5kJGO}5dXY{O=e-8cD3}5GCQYmR-Qz4-b)FG{H8K$-i(!Ha{sL6%3=8$G}7S;
zP~6`VnGdv~N^W)OfX;s&fT}{ySZ?L8S3CL@M^@n@ia#!!R)Yh5CtBKFwUzB&Mr3)+
zFKX|zLpg4P|L=RAS5JP)^-57#qBdB371fB^F^1TU25>}(;NF{!>hU?#`F@YSGwHOV
z0aDAqa)vg)e)=CsV61WUM0+dk?g&|O!!(`te2Y3~RtKwpMR=JniG`{HkSuglgH?Lr
zJpZN{#wu3WZFxJ{?7!2SAlBT=Hc}3uFyNu!=r0(r8)x~eup7Z6p<87P&gpWUj-0#7
zM+sS;<v*WWOL7;@<3*)_wEXQXuM#sELNo%<Sm7<y{{yK07AJX7bbkHqsw#&#Vm&<Q
ztR?8iqQ))R>{t3zS2rz*dY_$~JSsF(7l4nfl>E~=aho}s=}bm7+E8EW*bV(wgvsp~
zolwJd1JyU8i<nqCmF7kS;T5e$$@g4H#D&0CpZ`uRf>+=B+sCR)Y;|XFh-j_8KUkW3
z>@GZM_w;`01Js^D0G-i2lLO_jVZrY~GQn|o9EmC44{FruxjE*kv?IPg|3)=mvMYNb
zQ;@RDT(k1*f|^U<!H$t_f>~EVty`j6MNR>Jg9Yf~GK<fK0ZK3}MYP3>cy}>nuaS7T
zac$7MIKvRIo_{EQSxnY(oj5IG`;3`L?+a?W@;tRJ+?o1wj>ma<)J@C650i2Y`@|?F
zirWgznG2*dP{MLgN&juc*)UF<wz@YiuFw0A(L<`I^rHdZv74{2iH0LDF$HHOhLPBx
z*7QyuHQ)Xz>x_$@5xb5TkNx<uSjtZB2xdrj%XjRGk^=^b5=vI>^x6iEducc{RU~>n
z$ujl5z$VPB1YNpctn6x9zJunP-lZDU1>lwj7v%X6WP`rF|0;^UvrnbOQ2Wrg$paI^
zXdr8M)E3J~1{Tp%OD=uom)=tPCbbKNuRw^Uor{u0B*P0n*DKH+N8a2>9wCPCW?Brk
zikOCEt;7StNpJnN9ae`obVOh!64GvL$@m4DAS9SIuOROZmrzHzoA>ENm-6XDk7Cpe
zmVyu(-ka(0k?2SB+b*reZ;~M0fXR|4Sug+fn8IO;7Jl=H%vpeY?-IPmYd~oh6Vw^E
zEiz+tR?zAk_xH%XXmnX$QnP@O0U0E)zPEAhLbr{-{PrbGe^Hs~46EO%)KPU{wwWiO
zTF?u(i!@8O7<nMYM%Q|VXegC7ms5Qp%C-DaL5Z`jNWi#Rywjt66~bWr%_`0~C8*|;
zDuVs2WhId=kOKiTH=Vkl71P)G<z0c$z0$X;pF>_Y^g8Qrf*@2yUp{i++xLP9>S}zY
zX$E-ft^<d8{w9}ZcGz5fgt!IN9DV5JF#*r5#Www1A(vz0+evvD5-%-S3Dl8(iZbN6
zo-PYjv`)G1XKJzhJZv*VzPF<2XNdeHU&*7AHl_MKg*}^VG1_k3{@dVz#iz$}I4%N8
zbgjh?^Lko>vzSgxxC(;P+f_ohH>Yl+AK@{FGslwgbkMw6zbN0GA-lF$54){0By(U$
zvZh{&ek#rRn^?Y72^zvLr?vRqJ^#Y7Gbv9v;G{?tym+#w@7`ge!WTAg<!*|tbs#X8
zNl-Py>3$F^5_<<FvbPkkPK_G*MD=?S<Ud_nJUB5-9FSo1f!yIptuO3z=5g0)jum{_
zd;l03h(IRU_<#brer*I5k@Fw*`s!XHLxez6i+pm7w_qAyW#;7NQxOSAi%)pScp{6(
z?svAf&bw@g8*)&5i0h0vdLnQw!Xl7{OhIfj-j`ShiK_bqbR%BG6RDW`XyFF9X@~2u
z;qYINhns@6gJlWW`42RDaezWwd~zlpIRH$09>=J4x9?-gLHjxvg@595o@b@OR-qkQ
z_&MaB`CLKG@PYKNpKxs1XGdEZ>AZVDcodTU7Z^J*w1(s3?cVhaf#4b}x1DMC+Vxua
zf0~XkK`%m^%pWQE-9E!7UGdy89SYleQMX_5gqzf%(Xh#o)v|8U2aNWY=FCs}N7_FW
z{NKTGRgv7fSIpm-CBG66@z=Xu3}M1=9v?--#HHD^im*E#@$#@tS}T_i0R!Hh_^eP2
zfh8Pdw2Zhb1CddJk|rRddlWO*#zS*n9wV$~5reHkR5!_67|!UaO@75K;?0*U9V39{
zA4zQjWfafnTIFXWjMMlbZ{aVa#FpYPie=KqV-gufmTM&hoKFWVh=;0La_rhBf`czA
z-XM7_f|YpL#XN5K^1?KlLuD%n#xFF9NA}}h;q_1PXxyc{WZt>hm&;HeV^ZujKX`DL
z%Us@aZhU~bFDhkn*?I+KC9b*B=6Ahl^W5TI4EWmUZ@eoAQkoH4kL$JP@#R)oUX!P3
zUAWhah)Jb@xpQwFJ$^Uz%844bjIrx~!8SrycemW)JYAV@etv})5o5?h;}PL^+Kif9
zEB+Q?R*}TAc7IVCLL+PEh}Wv$>{K@)>K~e6t*y0K$p>1Z?!?v*ceQ^u#iwJEH=h8)
zZ+%ViwE{S=pv?Wn?h0hiT!fW-ab+kh-?r;i=<}bB0u~8fS!t?J1MJK43*Ox3uhO@C
zdLpk@&w)1c%)Nz+@@8e{ml4cv4`F@NV(qJgV;3&GH`G8FNVz?%T_{kf$s$-EaRx7A
zomZ=74$*U9idwS!#pK;5ZTE(=G7apxS=r6I_&<+9Xy3s9I0Z$SQWV#j&A)wvswBD1
zZaxl(1!A5O63JxL&Sn4?q_Z4R@`k_Bs>i*??_ki{TKok8gK}cD>Wky!A?dC%VRsXe
z-NSahqI(-@@YCb=?=PZBjUHgB6=xi;CuVjPS2-B9&cMaX9TVSL{#MDoY3IkjfDmo&
z*R0g-q+ATvb%A{oZUw2eUtz@^Z#SD5Wp(6AskTxkx=LeIw!f6447A7Vo~**NIF;V)
zWp@!f;=7uXF6Rc5UID)TRjAX_3UU$4Ugy(*+L`Qwtxtn~-K4sc0d$<<C$B$5y8_3T
z?kmst(;@1wyJ8eAcoQzGx`f`?JRE!MGsWacuEqOQ+_^cpmhgJU(!0QP<uyf(t1L`P
zc^y|Jtq_0xy!7b$O>H55k>q^x3WH#k<vG>Pt0dV^8#%v?=+>DnUM|a_WTxQmLUE>&
zO3D`YIg>)Px0-PSXWD&T@}mHqSWQ2zGt1YeSbU~O$7R#a7vZ~^MMQdHklLYJOm-0d
z{ZA$7(t*$uTT?RWZt<lW8@TaQ)Y3bK7%2qPkp}}x+hktV*}cio@XfZgz3HNVEH4|g
zWrW|)G%W)1@#Ne&#+!BtTYW!Vfl_aRg0=>4lBHd(v%vQ0lA@cias}HU?!juadE-ow
z3d;GCFwZ^033OTRg1=HSe-uGZ3rqH4KQB{FSD2?J>UX7KcrB-Goyh2(UqWmnZO^xB
zK-J#{J0{jVEnJ`4kM@h?>OP-uR@}7vY%bnROTrPFyHI`tZ_3HDQ7k8OenMqv&!>BK
z%F&Iy`7M+xJ1RC}7xQS*#z&UjMNqkcKJ?d%wFtKyp`Xp^`vntt7<CXEc|$?K5!gHU
zm;t3W3LFhI_>$z!{_HX}yC4mq5-`lWr672oNf2&onVHu^m5INnAYHhp8pVdg?yMl`
zk%!oleS}ca63^=)ybuD7kABv{Lh{%l*kZNIkxw9qu^POL?L`E73^zb1Eb4qeSm7Rs
zwUw?J8GMn)y&U)?{2fpLlV!v4K_e~FVJf!N=b89Sb#~Tq%)p2kI&mBlPB?{%VE$ra
ze@3rjWJOp{CwZQ=xI9GIrN2Jy6$@D~H8sA(R=I?Yu;Bp_Gf@|4D{acY<n3I3{PQRA
z5I(dH;xjseuePObe?;W_&_G!ONiu7N`9Njfx9puF;fbLmgw0JPm%fTqQFffF{O^F!
zc(PKgw-H}e>E{P@o=+ge7P-#~KuLQOxLFhWGY6Mv&lQ#aDi9WSN!v=wQ?yYAvbVgX
z#jr}ONsX-Z{39~e7s?9pfe=2=#unY!Hua(ER;G(NRrF{?`Xn5#6)(X@xMxHoG1A5s
zs_ORYwqKWZX+(13t%UTq>p0*Q@IQd&xfxzaRrxc1s`R%u9tQ;w4ciwDO&PtCg=PBn
z5#OGiMWB$sTlDH<cHikQo{V((Re*gVD%)H*^~f@^grLV;bT_Pd4+#|(0^a}6p9u#b
zX@vh5rG8SYj;I`{CNj(M$-(>VvGu4^=tc2mdCWlb?4v=9O@-Zt^_rjam5=`az?cEW
z&OeO#2EdpjyVlew7_4>$yWw|s)g-?^j-o{tiSPd5+5qmS-vHq@t)-O%*x@}LbUMQZ
zrtANLyF+tg;wZ7egEd`vC>%rpaZUTGIUMocpCj7uWWH!6c1TugAF=uA|GiHrwC%p;
z`G+$e)a^*{X%<HI3E#@c^#~Lch-Vw;?!VQ)|3d$e^d1zkqs?xySdRzGGo0Lp@^13}
z|6uqo(@aD-V!wlenhD4i_H;96h!@(&I}hHIj_$4GxpG)Vq(|xT{V4zpbymf3dhho@
zinOriJz?nnUx2saN+)F9-BP!YuzwG>cC}Rt(VV<@lAsn~qA<(kVIOZ>;K9)L@!{yZ
zgzCTN&d!q6HXMRLWE=643K^e6r@d`0e)z$hRwcnW>8FPeFNDddzh*W3HH!kj;f#ua
zLPZOvLsTOkYIS+0xW=jCWh|mo*97)zx#f2)Ql0m$kDYVyX!Z~c2xKS|lOaVnjLtw%
z#*o>4#V6&`PPQa4R?LO2nWs!Z&&vb>L40T`G}0_+AAa;IJMNZV2Re;FPxJ!sjx!`=
zIx(2H$I&q&oA<8{mDVMrFHHemdbhbV3NsIHy5J+~SUG0Gl800KONsD*Nko0bgxtfP
zAXGMVb;H*=gmSqA3u5Iu={lY^T<+A%poAxvxRRd5$xFA8(z5pnKzTS*YLIP%PE{-j
zWf7(EjCS9ZdO#JYUi&QnKM;o5fHNe?&xOBV8FbIT%5){1nP82tfa!~UX7l&~tB&=A
zmfBOh{;e<EQGsVSPfbU^f4BXM7qsD)_2p60h5D;r?D7O9^$#Wxg+u9YOE@yqoN||o
za$onGQfH2$xRK~%@ediYVx_}A4>{SNht5x-5|sci%+#uL^i-<jgZ9g*+PtBtzVQdS
z&+m5}W}+uH8q5-HPK4KEa~8U%&qP<`Wvm>}bqC;inhq%6kdDS$V#yXRIe>T4yq?a!
zhNy#c>wg@yfCCa9A-*1@L!Dh;)ZYoI!H}^^K5+%#Ya$H@>QfG$dI-eEQ3GmhDR+v5
zqIasLyU`0n4rPDMx}^lU0@wYz>lxJaCEM@s@>!L>atB6O+j#CwS6Th$+n{HO{7rKI
z4?%i}Yf+UW7k~eV&s4!LRJ{$=Js6%N#5m99&rST(9r#I!=e-RWcVsbXK)gHt`*%{K
zEI0WvKQ6#neVYCd2(PA!$q3bIGs9(ZWt`a6k(xpcr|7(nLhm2!jCtp8-a}E1fl}v;
zUu$(tt{b_(ZU^|`d9I?daZ-V?PQK%lmWk%B!*9dY&Ss{o6JK8xt-02h;c^f2zltJN
zBxrgfpj@u8`FbNt-L0<e!*`)ls=E7~2lM=D5Anpd!ky?_utJL`eVa+F(}7?@WVi-@
zI3;yH?y)41>BqR>kt1XpXs7)=6GRG~&S*KQ(E&+9b~N=(figo75g{_^)1j^NsHJMX
z5NNnPok|Gfkwdkrg@=2}{wg@K8#ND9@@qUZ2^u395?Gj4$a{F>k~=Fb+u?&0BJ8i?
zA~+c`Gw(MbR5Yge6%V6iitV1!PlAxPxwl%KIfpJ9GVwT!GLwyTMd^~8ah|OpNtrYn
zLQ`GEo6QKc7JeZ5sdA%&IA$jkX(Q#H`5=huXd*)AOW_|BhFx(>_Q}WS3|}H!vD`kX
ztIP)yI+=H?GD|&zO!Mi~l9L921wja}f~Pg~XcF4PFiZkex$ih&mlmlN5({J+O0)j7
zd-LT=O0L&BU$XyIzJvcyZ2$G)oP|AX<>k<fg6NRALjl)N8Db#75t-shqbBknDE_Lr
zzUf=y4+-^xo|h%<%gz~#(EXKkt7V3239fez)%Qj^T)xq}u+O3ZrwB=RDkh3&@q5-~
z+}-Op{Ez0qC()3ykBa_GFR&+XuhwcxE9kjpcDMG~`E+ag0vvL`D$=JxIY3pV3xlP5
zL^@1lXW!I*N0*W!>nZ|k6S4?DMLv`6nzcMFHDYd<78>{bTv$yQEhgPN#R~REb(HwT
z_wZP)bH2BZ5?|E{`emHn!R6I|!MZrjy4=Kg335FzDz_1k6j2uI0aWr=;>BdnCYy{t
zmnEDm%jwNTgHMtd=;QPMeqTM=k)_GjZs02h2F5052RBq|5&D#XE0}-M;Py0th*M@K
zKIi?^5*)SXgLEQj4J(8g8Ji|@YbKCuiuaNO#}hVOGcMxq6E2i_!fir#1<6m2BAbdz
zJkUIepk6qMzmY3un4jLOA|qYemKoBzFyH#@s7peBb!nZ7;r7@MUZ65K=|}ryc5`sV
zH?xaCnVf)nOD5jbL%dn5lt3GF+v+V|p7f^f^t|8d!`V&vuZz_b#Lv$8<<)i@tq1)q
z?ufJU-`wKo!!%;n3G})wo3qMzQNNf{y2$?nx$?fA>T6%OR>YX+h;aY86*3n+CRv_(
z4ukX({#CEvoaVJ+7|3OswA?Wy4e?=uQDA)FYe}2EpWU}tvE&5OwB#pW7sr4)5Z-)Y
zhi(71!aZ=%=xJsz;;@n&cbo5)cEy%qI*{D5VJ$t-R+NdfDXUdZC>J0`W-^Jc9$;at
zx4`%wuJ13VxJ(Y)W=H9I*GeJAOAWFP8~=fvOP<_N2oH;=ik<$$553zv^V+>hv+~@i
z9B-)F7TKk*z=jkRmIr+Tq=yZdVNXwUi5Hd6Z_?PdSgAPyScl{6;BEb7Y?k=@a1xH3
zMgdXx6;k!07SY5W{FqlIRkN6DYARIl%$`Xi<&OUcilNWWj5l$&WsCZmv$RG&?*kHI
zm<=WL*JnJOsSj8s3+gpn%I-Gku<<0lO^XGJtCQJ!Ou;ER1Qi8~5!^$7bFj<ns<26#
zXj5azFvTLX+nSupw)OCUBsP9-t-j$^+?&{<{oO}z>e={jw`YANCQ#{As{!$*HXNU~
zY8y`TwaNm!Hb=>?hzq^J=eV`PUdk<zCx6szPJl3A+Pb^Q@iteNxwMf9nQ+Kb`hHn1
z)S)7A@9YVuwAhD<3cJN$A7gTLBC*qyV(66q$A1yvk3~<+gRIQxABIxBGQt^kXKC~e
z<rYkfi!+mM32BiyS;cPdz*Yz0+xDrktV@YUuLulxbbCpV{zVx^d*M}JDC>0c_bqW0
zvU&Dg_2P|r^|_YJDcavYOo<qoO;Y;D`9iGXqdTs`Mr9+_fpRl>!id(VtGhB+pzvx;
z0E1pdRvPSdhuasgRsfwGqAq0AM?(&A$9>;Ln|=T#4Pls_Y3L7TI2JB1EW{&UbRNUW
zICtfD$w{J-K7>qJE_fA`!@PXD;p%Hm7L?mQ8RvNlvcupZaqW?E!0_yD(GIdg!LdWx
za9bD<#R;`TrC}*vM5Sv;oG1#C=mCGkoM^)c6Y4HY^)Al)ZuTGSwN0w|r8n9cVZNuC
zY+ATA65}~a2D_%<y%e6w^#&>#O~iq=1*E5GD5~szaqW=1pYDJ!?lA|+PgmHDHNY=J
zFG`##EX=+qziZu@Wv$qfpLNLrZO<@WXUi<Wu5}55*gl8+(5#NO&)&hX3RyN#=Ol}3
zkEYXayEa0OPV%9aW*(>QM;mS=3uH@wK*X_j_gd?G^XrxMhKi{&2^nd`zxF=-bOk*J
z;=Ybu!5p*yKok7`f$+-X*@*;j;d1IghwD=_ur@`7W~z>eJ9^l@??`|iYO2RA*GVt}
zd9M#>1epRG1i(ZV%Q#3Ww0Arm{l5EKVC@@jUgBUHz);K*lJIX2zc$HFfE~U=J9buP
zlLBIZ4o<xpzCEnmS%*i0Op(teCyj=C*+&srG&23!7P!XzYRZl9;pn;@X>vwm&aW^I
ze#mbgoV{m|<%0h}mZk^5S3qlN&tTCR9dx6Ipek_MP?U1>E&UHP$aD@MEc`WlKb<!l
z)JtZFAGy7;nbbGATd|0KEQ_K>hYwWWO}0`mB)+_uhpfRiBSMgeJme~&(q$8_sQEM@
z`#6j<bVU)vX{kZDPnO|4+U{zgL?4C>=e=@1m;04>F~0(wM<Xs~Pds#Dy@?|ypT=N{
zTSvo;(fr`Af)NMr7mF^-^RU)Z0YD8})NXo<B4)FxJ+#7YFEi$~aQElbmOd00VPRiB
z)XsFlwzub?C8svYS*IY)#b0w~x>b#YKtLwGA7zyF3C{D)g_LPJ?jcw)ETZbQ^=1~w
zm!V)M)R$Le7o(66!|B7oX1F7h=ZObeAK{Q8@+m9qj)rBvm;jgG3~@2+YMj=)hW<Dx
zIx@==yQo%v1W&4fBpB@8NDrG$WSw28D!s*JPN$@+eEYFgXj?)#O|8kHsi{sclY_7T
zv{v|}vD?J!)#l|A4`Z^B4sqmOZ9CTlj}%?;A4leOKw6L}X_AH$_b$FWR(Uf;4h%yk
zAO^hOeXV~~B1a4ah(1nwK&`?Ie(68W#|;nPrcQODW|gj1bZmy(d6MXo(Avk}jwwM_
zg>!HlnAqe^M(L!B_+!%Iu8|Ec-^JfeW@6nk*InhPg@{dY#lW}j#ZBLPe*gVJiw+<t
zD_kXcP&^OE_>}t`G4xGS!T%MO1T=4`#{U(T`d67vy8he-7a;Q&b9K1`Z-w496F@nl
zL(bP~!#Z2Zy27#2nkTAf?|s5eY1G?bu+BgdNt$Y$uBcx*Qf~R{U;bn=ykY~f(f*r{
z(8a!O_xdQs7Gf-`$Vm{{3kj>W-jn+<dEQ8LQjL$QbjGsOzGP_fgZ-5<FbsdeHSZQa
zXAuQG*$@`Ejxo1RDo^uvaW^a0g`Mp+wOn3cmYfYCJv0|+Lw8rEyxfhE*R1A#Pcici
zN)Uh4{Rs%~e0%^<mAxp=X(T?M`STGyLG16G;+Wjn&pF!0{yKDf*FTs@KDwVAU@FQ(
z*`1fhgyrwx`F$P2Aw`gje#P?=Yh^I%G@t5s*CwJC$AWa<?1{oq)5~U<KHLV|FMGTd
zN`*m7o%r^6ke+Gl{2g0&)iA2MnyJW>to%(|Qn>Jtq~!v`$=ngE88mrnZ)x;LWW3tJ
zfXl}Rr^mW8DgOgAA8)*mcdKRt14`$sNsOxHAKP47${4DHw<Lih3$nfHZda!gCAndn
zbRKTyzIfh@mMOWa?DLWBpMo6+ISw&~|A9*FW$gXH!70ntwYHAbNm!%aA!yP?v7yHr
z3uD{nxe)q$?`hb~a)DC22UMlfvVWBPFTEm55SbC;XNhZ|W=a^t@$+{Liz*!;5>19k
zr+Jnj;hsg^#n5nOYd@+5-UxcJUI@+M!pk3{jc8<yGnF)g&NXeKaa3#+8I7Ckd45q4
zB}p$ntZe1bY&%PleCZdYl<@)2r%>j*GD9H+dmk$#DbLXV)d1i+RBjpddfZCV)BnMq
z>`NC@=TY)J|AvBW!9xD8gS#!^&=@|@jR3{Pyloig{;Ni&U|4TPri(aXnbg8!glODz
zH563C?y1zf_nQ2w7rjskLk6z;s^_#`9Wo6<@1JJe6M}j={#9{Si%~|=v2EKzh-qBD
z^6`g*9O#anG>rou<Sp#MvNGRz-K&aP^3_gnvCNYW+c>oScDc9DhC@a0PWHVoS;|*Q
zUoYt7LJu__^_z#f?jsUzI~tN<VwjyEGP;vtQAbB}m3XpnHJfcoi)gYS1k-6|v~yYn
zvYsI6i}R$`3lTj8Zl)C=F)9NUWNuO5;K}^jktNaHQOVM9BHYq^cuEW_w|#{(N)X5x
z^MD$RKDDDT6&1tmuDD37WqWlnZLlsUlWHeW3z4my<?|30qV<yd4|c-wSvbnGA!F9!
z=cp43SGq9cq9W%HJkssSr6oXN{awW2AY|neF#86Q@<{YWCl}ZqBQ9(ew};ku5c=2&
z(SZIcQ3!{0!Lo_}mw$K_o=rpHOYP<gLS(L#&jdKlvh0&HzWRb#(L=C5MxSswGJ3YK
z+<4OqcR_Q#AKqptrSsluu3JalXU+YlXMAA=47NN*Y?%tOyCC_Q(_%G!jvW049cXr}
zKYBC&v4O5hI?u^FN79pL+==J^u)<OYLw|7LFnFk60~6?<-f<$Y?wO}XEuOqnd5x>G
z?i*F}#d!&aQqR;tl+b=3#neWC-Im#I-$?rbFU6y(ciOG(`L@gUmWL7A?>D8@%Ri9B
z{K_G@nQiVh0jETZtcl<G%p^Q8k%p*vC4_CshYk5eOG{s8!X1$<JuFUQ5D}lvJfoGV
zw$#GPMwl|>b^Uy_`%aJd?xIR7z2?wlkvkC3N?QSG`0z6l%P~S}oF4s2KxMzAm=m?!
z+i;#fYWVri?jSzmwC*iDX}YSrV)0+;T^!n(@DC~3JJ8_H{tx8jq*$w@0eC<->!@`q
z;>>=4QRf^8TqYpuP{x0Je~t$z60l>j1^?XJfA^yzXgqwun+>D2rp~uJ(4%S%%6eI6
zsg<ko;wl*b?BPLE>P#DFJWb>p>>5C-&;^fl&aeTrr{r&W+^Ii|>`$mgK;|(eX*6Z4
zki9biLt^+VH&p@I%7zycaZxHNw$#&V7T6W6;cRXjxEy<^RyvRU&H$vdWIa|;`*%=p
zKq~HJC_CVYVSi4G$7nQ=K{(it_AK2j`)F!5p<uQ^I58EB`x|;<zRDSWyqgMtmfQ09
zw=z5HNm**ik0%eFXyGnY<cl-GCT^xhL$_`z*6Zotx^5hR(%d0;cD0R^(u7ulbj?bD
zPJZRZg+4?F+F8}xqUvgYqYM<~iU?XMPvqDza@j+k3crW0u?)9>?yF37lYbon&mro^
z&ecH9rUHsdFNz>_pF03KO$4;#5huy^IFBZ#8u};fdcuS?FcM};dQxm(GT)A=hn)~)
zIDTgSW1Gb~nL_BQvc`6_%71kKX2^x4aJe?Uz+6+{(48vc!%tChFxkzNW!}3?22K`e
zL-&-Il&Bw!YybA=m%BM;D>Sll-gh<wt)D<kz^KW(g<IMGt~-Fj1=Ur5svKmOvnWcz
zofuL)*IeV=Ey~Ln?ZPp%v><f;6W^c3oA9YAF2(n4LY0m-WChA&iCHn@u(9mgLAW7?
z?47VY718}3waPu#l1~51np%!|a2*`~*_Ay@{&CC0De^;RR8&Iyvz*L7EY}W}17|%|
z%vhFET6HyR*xVrff`aWHK`tRT5$sBy_(_$3z0(qNB=OVV`J#NV!>yp3$t#ImkHMx-
zLpHzZnqjMt49M)b@&YEF1W)336v!7W6^+h+ScL9G3zdd9v;H)!a>F;}0v`8=5oDU!
z$x5IETv(-5TXtms@6$GxvoG3(eTM=tC5xX#`)PIt)}H?P(rs<n1o9-B&s$fc_Pkl{
z23U8}IY+!$9!e>^Z{$b&w?7Ds|H_0u*ZAY1RJFsIs8?-%8TSxV^ruwLaYep<VhDoL
zk4oSbPvL^0t=M7NlElBF{hyK>;l?NS{GNU^`irv88iLZ)t8{E}Scq(g4EZ;ujLOh|
zc9z&G-~?ch`!`H?PNToatw>ybQ?gB<H`ftZy%VlC@brM@FyUw^nU%9y=!#Iu-yH!c
zG}`xbc4zax{h@B3JF84hR^Texbw+J+8`Uq^Q=niRIJx)NcF^fSGoEK~N%5UbO(Scx
zi*%$OJ=HG->P-xLSQnC&=&Tq`WbiQQ1i`5LHbADcD4iN%R1T5Q+z-sTbL%?E{^PQv
zJtNMN^KqWw@s#;~P7F3q^zK5%ty2@@5AYBEI`zp%>u;4w%_22MUV9t_FBhfribFo7
zV$nIbf()l;z{MJ!&D<zP_hFuqDZ^WCC&hUd#O-8`8j&33)2+E}+>tTDA$dcG;uUYj
za&`Wi+7D;y;&i7|Pd^>wq%~d*OFmevV^&Y=XV_$awA6I6r2wHR7E1kc`yWUm-uBtx
z2tZ-C6;|4bEgQ#S+XOF!{ENo{{S4G>k|p(Xgi=HVru*<n_hpN7IR)iuj_hRl$KaZA
zhF&;r(e|!ltr1a<m_FJfd7D(DZSvl?#XK*d^)44F@+Do-(OFl3f{9$B^h8JpuQrr~
zgF`WegtJ(J&piPwFY$Yx8`79vo+|aQptnLurAtbX_(J&8;(}%BcTGCa@M0lVU5smh
zC59bZc>o$s<-I!x<nJFYzV|35*lzm1{l0#&r&3)eJhX64Y5q&Rp$>Q=mBO&M<XzJ3
z4DCO$w-l6@boH3?q@4z6$n$YVd_N?t8yaFL!gOP}Y<b}a$3lV42bph*uh)OTHZlC^
zF<^P@@-hNQ$$B*J!W`ijv82HDte4#N-Y*GvA)Qg56;CT#q(gzbAV4k@$a^MG>+9<K
z@d<Sn4~YjQo+m10BY(X1X~$QH<4P8`c3CD8z*`5E`7ap$DmBSh0khF#|1Iatf^yE>
zD}#XrdWwZ!1D6Q?s6o<*y)~U1IDOW4dT6OdxHn>Zl-C=u5z_G`M^qVhN6`EsTUHU>
zPg=l)VJG<s5?f*Yw;OO7U@bs429ha@gN_1akkyBn5sGtf*BJXK$)QJrw=@By@#1@D
zL`RUS)x9S3At;cG1OM!mxHE>oivMn2W-#b)-vSyX$M>){L_)Sfx4~F614^ao&Flt5
z8!Nn+PO}IdR2KZV1UE0itM$E6CJftFH-L5ZTzpQ&LV@lju!YWes_{@9U{gLSK>}gD
z&4RhF=hh+Lfh?6E24`z1`VM%95J~6%`-PAatCh3=weCn7U)U-nu;3vpp(O2TIpUU{
z*v1d820PBh_I-nr!;^u##7`Io+D`mUSb(DkczJbD-lSzOL_<+a5a2sBwT{ma+#4Lu
z5nrK2<-xZc#4$~;0&*~H*#6+#Qybu)mR&H-A@`@pe$)MZhGt0js}_6phUSGB@0*)s
z_?+jHD))L?ljva<NEz>?^fY8r!N0q9@}52jCs3?cj6B47z6q79BjZz(pc0$1yKuVz
zEgGbw--Db01J&NKN$Agxa7X#)5;T3yJl|7>z2_*mu}q-RKis_QazG;^MUEgR2dvXg
zUt3TNA<qfRA5lI8BQ720{sSd>i(TeoN18@4zN$Bd+83B2=?2WFO%mNC>_{`@w9~xX
zAg^U6QNpF%DkQ)v`t#`E-pV9wpw>B~fe@LMreZnt(m-x>IF06)U1@JZEnuibX(C5^
zxWqz0<r>H*cocM#5~o}V^eZ-1D^ErC5zh?iistL$iq#>{7Elb`QM7vq3c&2o6Kqe>
zpEAoxl(YYf3JlnXsgT;&yJYuu-4z-^%S6~xB~6AS-1-!R+vz%+{tCZj-Of2L@>E-V
z7#Q%q06p$J+jA>G+US4ntwXw_-J)Agq`qkxnawham8xwT6mep~!_G0BWaTAzc@iE2
zHG^@ds!^)LD8bL)ZWnm}13fi}(`<K+!>78Er^t23haE`}&?!QIXYbo*8KbfhuRj*R
zhB3JZuRFY_L{I2{)WufTKL7FKb~2lFbfrhRaKBB#_hZb%XVM~<xu=V+o4RTadnuqK
zV#v=jF$M2I&qBZ7(*8v%d<<T2J5aO&9=(f6x$W{>JUT^{PcS_AmU6B0h&)I*>ni`i
zozBjz&}R$o<0OAEijw_49_r1gQ>f2eaRN?DA?65ZABh`a#ATzxw(`dzzcK`pka*>n
zQ0#K3@4h8=-~5R~-!F2CTA^ShY|mp8J5W(%lG4JjTBOi7`|c67mT%HQnVfW7oa%)r
zqhggB<D?AP_djUo@L7?CY@`xpMe(O-Wr8P~AJV)+<MfSgD7iBeZYUH}ZOunG@5s+K
zYP;>i(!L2z6b})qk(TK(wfoo-f-HVU^)<MQkIKe(`SU|uAOj_q<ZcH_T+-_TmQKKQ
ziJYAt_PleU4%@Y8n2)m%NMbFJhLTVSE2tp->5DYmW26_xwv>QhvqRd)rj;O&^KRK$
zhkb;d+67Pd@qyTRrpc42nlx0Dtz3?+q|=JI55aS>HCShQJxp6kf#eg8v>aHMhVd;~
zv|^rekZLrEpxPUGYVi>c>6_02xc7`$rI4TTU8e)9z`ouc2p?}Iv4xp2T{ZuyqTHrq
zSNT9^@cE89^Hv`HN6i=UF$iOjdS(Sn&ZqF;TJ&VA3mbUs#bd)jq!p0|Ijj|dOy%-B
zQ8VE14*{tFQbi<=hQTN@i7Jx-4&6`=-nU&oM>b{>th%qoBvs&}&I;Doq6%)tUa%#%
z*a4{-6o_0jdZnQf{NV(0;4!`}x*ZaENh<Ph0FkA<O^T9re5Wpno8onm`D!cw5s&xQ
zMQ*P60a}aS$!9I4JHkr4AMx1-d93MoJBCMU8ZD?maAX|7_lR{FfDXt;M7DO6V<Ny7
z@X7U$X*dvIpWf)v=j1)Vr-=s{$@_E2?gc&=oepHQQUJBsA*Mp@1RXvr54V_K^_5sD
z)_l1CVR9?n_5qOMaoi>1z!yVUZ%Y*~a`tx#2apxW*LQZ~dWh@s8p|s3z+!G$Q?A$m
iNzL;@>pBo|A8=+*@Y!9CwRe&t0qGOzmh9XA=KepvLA{j#

literal 0
HcmV?d00001

diff --git a/fluid/object_detection/images/COCO_val2014_000000144003.jpg b/fluid/object_detection/images/COCO_val2014_000000144003.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..1f17bbc667d92dfbc4c3b4f17fc7b3715e7ffc1b
GIT binary patch
literal 39911
zcmbTdbyQnX+dUY(NTJZuLW_qMcbAajTHL*8ad!z)q!f2|cXtXDcXxMphY%)xzxSKp
zTJzV;+^l3}t=ydZJonu5$lm+B%)hJy-bjf_iUAN1000E|3-Gc85C*(LMMXnJd4-0C
zhK~LU1MA&uEKE!+LcF&)?}$H=k`R9+A|j(?q#~oB2NDrcbJ5T<v#_(XlTz`3xLNrb
z+1OeC4T6A<j*f+iMezDH0V_EXIqU!D<D~<D`wGDY;Tj2n0)U8%fP{<i(hVSm+lhkk
zp9ApU0|5~U83h&X6*>kc{DitU07L{NBt&E+6cl7+_}O0Y-vP+DD0t*7f~ap5zN1mt
zy<_!_%6bJ9s_Mj7oH(asGqCqV$9VsNfbiqzFI3btwCo(5T--do!Xlz#;u4ZlO3Es#
zYU&!AhDOFFra#Qg9UPsUU0mJV{eK1o1_l2LiH?bli%&>QO3u#7&C4$+EGn+9sjaJT
zXl!c!+tuCE+t)uZI5{;vGdnlGu(+|gwY{^uw|{VWad~xpb9;CH@c7R!1OU?ixCQ_G
zAIJV5e&NFXLPSPJLPq=N7XqRS{EdW*j6%+WiYKUm_TBC+1*`9?cS2EFRh{TSHpO#%
z1N#Y#_mu1#pD+Ho_8-sw-#O;@|I4%g+p+)S*CGH52?0KNNVotn0NHRdVW{YKqCVG!
z99?WM#p=@!M26TDTB};COso;Hz7ys^!KeKPJ-cPqZ>^~zhZwVrMW)-*w~2kH%g}Xa
z8@;}V$saDCgSS-zG_$Oz$W%k@soxaWp4YC^tji-I&T<7yVp?aW5_d-oHu%kc_=PGk
z@&Z7D#f|-jVS@e;sSO`^z5uS{m8UZ&YZE<(ns$+&`^Wvpt_hwkV!bTmhi!eD_D5nK
zW~?-CZWt6j8b;>~D@g{qkuFPjsn}=V$2@%jFC`^FVF?>BadDdpjcv>n=}?y+`Fh)G
zk8{HKWdVa)%S&UxMOn_EeiXe*Ywl}kiBG-nDJ2FP@hGqAO18F{<p?3pr_gVx_C%0~
zHmacPKLZv#!;9_p|AsVZ{#*p(X=j1qlZ`XrKx<R3NH?VOSIedWzxaCr#1BtL3xC=o
z8aeV)L-f@0pt&yqYZLI*qh8Php?}wG#?_?F(>^4C+tqpe1u!J}0yxWp;hV}&m$8>9
z*|{-?@lqd-dcAYu*mFM%so)437f-POz5wXs{+PZ1JntT{1#0OzxM~IWk|O@&p;+!J
zMdSDh37wH@yu&45YN_j?^BsSLHQyZ2&H=Q12I?N8AGIGniMCMdCcQj+0U*NSf^j(1
zKR?BexJE(#rs*ZXqJJkl#U_VARVRu*hpR_yC``jtR`TPltnkF&+4p=rN4M>MO6Q#>
z!LHG=qB<dRn-l+Bbti9x@UT6*T_vl098CkBcU;Qoc7%pG4rpJzOVte`cPJLLVy{25
zupjw^&u9FV8~R$P`Lb77)2y;I=XAi1Jhq}y8>PtU1nfDfvs6+Vg0@Kh{fa8bqv(c6
zmEOA4xv|R`*hW-@KSnj3uJ!A5&+XLI%)}wyg+Cc7mYY9(i^IN8<|_BKa~>HI57npV
z0Ns{p`s1&^^!<d|otz^Qj(UhD7PlLRs(w*QQ{eIFO{<OtIvJWV&6`?(N}RvDEmdt~
z9HC?XKEH=sJ`P5Zal@b6B><@;_~DfC1>dMM#s!V|z3N0Z!adVY&)dM1VCtDEq0kz%
zXVBu2(`i3`cK4XpB&oczk`6E!n2y*i;nGauFP$V^<<k2;ugXDXMPVWJx}|zFDPKKI
zYIP-srDZ)rY2J%XjqwJheVP%E0*mvKpz)y57Q&d}TS}+3r*)qj7L=I#;C9f0_5*sM
zT&rQ@cogq{tj2{&XT&Pjmk!;!%FSM;4-Y(cEY6hfbB}63?GI+YwN&<Fji!?d<i8#>
zlu1Q-%{U;0)7roCC#h(sQoCpRi~8<xq%;9=n_O`3@U_N7Vj`Z{V39#q>Lt#@nZ}*i
zChRzPxLTy$DZ~)-)J`q)_sNI%@k12vg7H+aZ9P2HzELkJu<5_GBli<?-6Al00pzE@
z0OZVnYdD9Di$-cR>l=p5yb-4t;z9Mov9&-`6xV8JNNo(&f2%p;{42#o>Z^J@*@c|6
z&~iGR>f%T)65ZtsfK7F~QhjTW|IwxVOD8ZT9e<vg=cd@#;mZvF9Y+QKr_|zzmg(f8
zTW{30mZPI@=t(0adyD>dFvk+~WpP!KwD;u?WCG*Mci-jYtEyC#!uw_uLrM_P^!o*B
z1Bv`=Gg89clbb55No+?31pG4NLhbyR3C=+Q>_6u_-+vm^l>kyr9m%kiuLB-FM0->f
z8Sq)9prM2#+@3;c%?9*eb)bdL%&P%iyCei}_U4u8cg3{rWKA&2Vum=FaX%*TL)(75
zktDz^zM<lIoH5lY@!`Q|6EHw)1jmVta%RZ$jib0moME5-O}<B};3+Vr>C+5*GV#Tg
zz_#c8_%ozh){uvowjSGE=}lrjhvZ`7$`3?d8rvV4IBrXl_4!K@qGS|H8Xp7e>$${G
zf`-Wu`@EOmE*0d1d6$;7Pr;iteGe}HHFX0j>fG!T=cr?&d3Sts<v7<XY9YnC6A!Io
z!WRHK*E7!xU|SDXM|vIunaY5Pz5tHYr0YW!Z&lj&_SVA$+T$VPR4_SU(hJ~c3M};n
zaLM!n&`hBUj#ghF>1VC%M3v3CEYGp}vIvT^d3d-ddxUAc0KR5C@P~io+aeXAbRJUa
zzxnG=YB~nRc>$E^;6DeqHY_rK)|G#H^m=X3NiNofm`n7=<70wU^%KMor(!4L8vO+j
z)31Ml4c?i10Z>40in_WJUjPYm&|fcr*)msB`;gk4&?&fy8RvpG&>EO8_|lE&1puXN
zzdm>YM1db?gq5aW0JWYK%MaeVkl#F`pth1~#HJU(yHHHIds5htx`Ek@a!TxJ8;%-f
z&$_u`)<<q{Ns9_L=ZS06caFA@od%mz$jt$8!cZw8#%01zmE-yKb(lz|@U0m^Ez^9t
zyO)@q4(D%&+ceN8f*&v;yE**aEDf*rj;oEZq0cV>=Jp5v&`%z&R*^qt<rKfY#$`xb
zex`F#N+-~_44WkBYM7g>EI;`YBO9{IBPcYl&xbwFAuw9<1o3T{vp<3~!zbl<yy}k@
z_c%L=Vg5<!CmXavAkme=>Pp>CUvk%v4g`l(oU7zP)dh5k-5*1UEjb3S#&pedddKf=
zd%I9%U%dc)XH|-(&BPSj7PUwMRYE)e=1JAsLaReR(_L}Z)y`SmaZ;DlkK?CR7OOuZ
zB)1>0oq3a<3qL!&01ChlTu`Li>`JJ}*C$R1QjbSfW<^;#T5cu_6afq3hlICY()je?
z>fO4!wg$;tK25rCjgK6*p<T9J`ZxJjTkyfo&b-lZ9`{}VvXD!gdo(jq+nr!`(>ZpC
zDa7L(=Vt8RleE2dj=dSOrRbs_s?{Hww&Y<to4C|`IzqazwhBdhw*Oy4>arVqJVS9a
zG~4r5KS`z`JZLMrc}Q<UF(L@^PmcdHzNF}PUEQ03()<h$)4b0<HYeEH`Uz0@d=Ip*
z6UX5XY4uLZ6!R#$0V~Ubf+boAU1$gvK{V1xgC`^R=yqXq=l@1Q4SV-2raWb-xFm~i
z(P+FxXm@>g2aZ!dAC4h?qaprzWVos|Kq6jMUs5R?y-%}n{q2*UPx~CxBm9=dKsx9Z
zbgpmzN=u^hX2FzqYbDFFoE|ik@qrC>R?l(IFMuu>y7X^&R%QH;#TgSii~Z;I3#e*U
zCG{;@l={<h3_D>D?Kt<J85!IEYdVRIsC$B;veWw|)YF2=rdhmaDNik%hyS@A|7o0h
zQ9n1wpCKQ~gVYT7fkw0wGq_=LWjq@??74X}pVsdKxgQ397HQZIwApxkIcZRA;Zy?{
z&L+E|rOUa;YnPnpZEOkbX2lX8O6;>m?C7KqHeA8bhP^x(UI3YT&~#znzfp&7usEWN
zq&PKaW}dTTQIuT0Gu)VCS^?Wv$XoFoy8r(1N>6tPvj6r4Kov#y`;+Z5_6(>ichJ{x
zq1KRR)cninUl#xLZ(^6=*!}LPO48m4V=b7s_bY+6{|PPR5oWD>-Z+yv$>YJYq@Ts}
z#3YxS4#MNuk*spp&Y5-;l$MmFWL;()c`G3c^9zHaw$O@rS?I`d%6zu?VApCkq$Sm1
ze0u(=uc@<;;Ek;~YKvSVA;E*gH*W=XXa1>3TvBa^Z<YbZAl`9gxxZfKV7bv$Idj3U
zujVPY8Ve<gjQTV~uQP?YvqL=;zLI{m%B54>jCto1VdbFy0ti_m-Fwz?A}BW`z0iIE
zWIXI>-)B_8C_R#>pxuzUBJUva`cUP>94p6kLXt%mMT4+f1d_$RndAD5dNEKQ(zW^W
zl7u`nnU5d=+4xMy9)n3I5=R+fKWer0$;aL!{ju2PUb#ys?Md!ZAgw&>J^tu@DWtF9
zU#v7P53@+SF#Y!|bi4p=HTQZUC@%mVIh#QE+A)=eFRt7gIqk{bl6>}0!V;yFUuAiS
zY`uTZaz<F)Qd2fqEs3Ps5P(2*WhTmEF2>3VO-bPZlHA*JG@Xl{!nTEc!P}ooPXi0}
zC0kE9?bEZ9-;Ukh?Z$`BHcyc?jb62+`nV$<g}(s8OxvDwF*#z_OEEHOl<6hGQj>LU
zezz0WR!g4N+yd#D;l0(2rH1LxTF&@?F8A8+Ro#zAbLWJUGngDTd9dvPTg8vb^%rHM
zsR~0UAZJ5<t~T?PgQy<DC=~^9?i;8DLuiD743)(m5cXJF@*JV;z92P)`vTD2c_bVv
zSQMp>%lq<$TQq^#i{SjVZou|0+o<BqeF_57K$@6>G@Bhi-(}-YrKMEJ&0$@u_&p{t
zVq--o<<xU(BKcBa84*FL6WOHFNz|8-1zD01X>XnxU0|lh@d<-Vp&vL~Ssp7%Lw0d=
zq&*K_?|VG-NQIZ#K;Cwy=g;`rL8Gx>)-&xT*;&7z^AK*9X=)8o+9!@T5O5P0QT*D9
z{Uskq?0TTXkWTxXJbcDpdXIzX{3kqtsuuaM8WXDH2tU&aSY`psq~i0q=X0i@G!8v<
z)Kol0rB)=jmwZ29SLuJ!>s-Ec)Aeu*yt+ZHy<xz%;sw`=(=KcI;irjVyqyA0tyU92
zr(+b=gw1*uq9sOvM%VeiioI+DMTS3}K(tZx#x)YrzMs4V_bsYp`YS`-#77(Z)_?&5
zU60Azb3Z((x0;H+H#)}h8arS2x&IPQG@D<DkZW$wa4?D)7;$v|Na^m9;KF7_-5FE_
zoGkhwKoLmAqN`OzjtR`{m4yVmdxV(em5db141L;U>9m{LuS;8WrgIpN^6e+`zryzT
zD1<0d$j1Ujari?r3$*xA`k6cZ^8z~6S%vXq#I)*_OxTq@D_<z7`$=ewXx8a(vIA+$
zblnYwx|zDVmN7eCQM^l?C4j1K%4Qm$%1l^XZa}|KXMH3#h)rXz#-EW%DT^wdr<kjQ
z;M_gAxplL6rF^$@-KCs>K{{r9Hy`Vg!gsRTn&o!?VGEei+hF?ZcMER7ZWW`_v%u$l
zS#TP?Tt}PQu6q*B({y_$`1y0NJd1%{Em3lCmKovLDyVq-cWS9283|6&O)*SEl>k(e
zVSdQZn&Q)Pd#=U*h+%0qK>C*lIGZc-IizwAEkjN!O8==xoS=v0O=`6KR>YA^6n7Gz
zZzQdWnAxQmphvvXt&LUP@JI&3(~8zToK4!=Wl{PDc%9oA`Y?>J#jaH(%@;rwsr8`7
z$+WIpas?{&e?HV(&wxb_e!E<E-^{U>1oDSO#YVHF8d4<gSvX9GTjZYiI|T4#rfq=^
zg_0tO51I2YX-K62L4>=3I<3xq?+U2ZNmwMtVga|Aue9>Srf;py*=BvwlWBOJvlAn-
zStLVZ-YqMKn$RWmLDe98FgG=|kR7`h0RGv-3m{!(?AH_Nrg&2{WLW&!Jz?MRKCg%G
zok6o4mI;lWY!4$fBB-{pU^7$+F>fbZXQ|ez^RVoBYJLt(==#|x(#nzh`+<%s9h+(u
zE7hWb(yo|*KKK`;TX_KS{p!~vb#I0Pcq@wZ0$8vhRl4}8ssDrCPg}=vfJXxMr(m(^
zne}o{zPY8TC2cq9c4zt7mBfEujgd(c=ifTy`L_=B;ByvwffiCxcgNCxO7A<k1T-QQ
z+Pja~=fmiFw>;^uQzHDAj|sgSC1toQj!{*8jFh=|7H!`yWGQJ&naUO!;%QLrNlm7M
zDt4K7mF)bG6KA`Q;3JHaP_#{2STe;|Mb+Q>6Gu$i_kpqN@#Cm`OOsvkR#uXm?d6VO
za7E1&8HqMsBq~q7wr+0q2(p62{03$6>!9Vh0pL{u4p;v}YHiX{!`fsM%bQ7o!n<mz
zqE@kt%Tv9~m|peL#)H{nQGyS6T8%rh`%5!&98QeR{zSP<S^8=&8}cgwwiO?l9rs=L
z=06?;%!ZyHHV&<dB)lmd@N4R|73{<h+NsfVqvpcv6#rx4u1e>}`{+JHN0xCOy~qNu
z>nVpGq9XRxzw4cT7`}B4f{Dcm=simDfv7}6W@l7l*+}v$zGZ7HljHRkfcnc`y?NIs
z6|F&5*Wgy*BqA7Z0-~X7a2T5a2DhDSGsOFu0YOb8267vp#w9c+VA(CcUQUIm(W~Gu
z^Nwb3cWpeK3$zLi@FbCV_9c2D80BW!ZU&DerXCwZN8Hdp6}m&(fE-C1$7pi$=^GNL
zTABG9s#wEFTT$Bvcf0!5<s331|B%L+<{I*;nr2=t0<n_3ck#?E8_e;e@Iq{F-_~L{
ziBQYJYoqgVF7L-(q%c*quvq6u6RUP|q<Sxh#n$r(mfs|~@ua`9W0vVO=0&Io5Yu+~
zHq@?ikVv_<aWs+EJ$-N-#?>T;B`^Ob_6`3KyX<u1p~?;kkXX6&J2?ucovF(ip*F*w
zxuJ-0#(aOALz$G@Z`c8>GwUHYxtw>H4-Zqxqb)3KMew0{FP8`p(pw}m{w^-;A`?M@
z`?W1!onP4s96>O>LA0}3P9=f_ja7fr@Wm*DoW<wR_;2%@qzlm_^-mV4Z;v5m(q?qh
zgp#&K!qsvP;W(SriWUl~X)8A;S0;zMW)b?w63}Lq5$u!e72U}*@Zt8S_7#5I2W6Ld
znCNy@#)hHq$+U_?Dy|kt0n=eMTVD+r>Gp2vCw+Smhnz^p2`?yi?P?Mk9L`fl!P+{<
z?#P>7*p(zn-^LbkQEq>3J!3U(mzMJaAStijWhHpR*s18$F+n;lxGCy>0lapv7{0M5
zy(@zFK^~>)2MCZbc!&&&YrVf)gW56L1u%(7-HUT93zqlrr(KOd1ph+N+ZsC%`8GDv
zF)*U-gi;e8^a7ZZm&VmV@56HIL8K*EIzi#RLz|7-AS3{xddJZZ8pi3c+R_}eav?17
zyH6nrxOODi9)o$qDWALi*La}C%a!%Qp>H6gv}gzv-i?5{{|3U={|$u0<B5?>-@maK
zhotX?)P3}m1WD{>M-pr!hvy)|qf+@q;+okZFGh&!L%4gC(y*Yd$5us|VpR40+1J29
z7th`-2Ok<~Q;Hn2Z6q0Cg2xckHmR;qWg~ZaMDKSBV|njhJ+g<$C|!K(p=4<+SAxOA
z)71-rzoe|k9&#lQ9)n(%TguJ90PsM=F)#!0(R90TaSuU(L8zMK#;*_-*&cvAqCu!q
zl^BiBvZ6f?8fr$!t4#68`?D@#@%9h1Q2~|B4wr?SG`w+-Boa_@^TBcO&K3;m*<z7{
zn0HNiYOkv0+qq1O7O}W(*D?|Ab;TEd$;Jr|trLNLBuZoFMk0v<K5Y-u@ondFOVjhu
z&rYDxPw<X5jR~G3Mpa`K-`J(vO>sDP$#EIA*5tJ{r?Q6)y`96zi3b-HmFINvf$&JF
zx^DTu=e{Pt8+ZW#Xx&l~;%$a!CJ=kfY_L3j+I45SIQ4RmwW+#x?2*P%d}1rGquoeT
z$4(TEy5{LG-ni{+`Yiww`=%3%{RGP@j-5CQi%i&S<ey`t@q{OemMHLD#^Vim++Bju
zTMR{x%n3C+ne6bZilWEc(C62R3=BDYpCfNjoMWEZRDT{(oOhwkHq*QM=garl+CsO|
z>yQ>DDT@tI7*+hUj@5}m;pL^kDVS$7n1;Ulq)*-}%<-dhqE)6&_W&c0p|-l)<+$FQ
z(!Q%7wnm;fytU8G5hKNQ3oWuS<PW-E`lNtkm%dFZ)grJWi^vi$m+~Dy%EHw}Tk~(g
zPMAzch+PXBJz6_YQ7b<O=k#{^4S(SPZ56gHj=hC$0{@?SvgYa2+I#2EsWCQsaglcS
z)2oD)P0^duN1aa#+$ylmC)KM?B1LAq%a~MyyoNt@zs?aJ+<j=>%1eW+tc}gP2$5Bh
zqtJy>3GpWm?LSZO(NTBRN%XpwF4nN`+w-Q3R`Ih84n}kpf-KVH9l`^fy}pU3|E)Z=
z%GHXh!lb+MVH<IrbXMg#EYH(PvUDn8`TKRn$8HKdS!-4MyD3mCjR$<({Y~?uK(;JO
zSV!M8>-Ug9j>B#;<CF@6kphhc{8?(YY9&ew117?{V&~iEMv4efxy$1ENP#lf_(}x&
zP`S8QTXsZO_loWEb;0TTlDJB#ko0S}?Kg&pVrYeB9&q@_T+<xM!K9bpI4{f}=6ay6
zxFn5tksvsS6m=qGcKrUwRDF~Bw#w^9-#}uAo08DKtHvo*LmiShPsq@+8zps2s?&Nb
zgpB6)*$PoQk<^G4=Ebg0J|ohqAB{)M$6LV`^8EcU4Yb0W7XYXP`EgFeyqv*LhKu@n
zbqi`S57SoTf>_QbgnjtTThVxIp81HLGY!S39otAzM%xR<2g_`$iTa^|EQVU~%OIvJ
z1jOymdr`WnF@3y&p@AebX{8Gj9~|+-8|~S8?3$OgIF_7<jF@0QEo#R=7IAaTtJIYw
z+3;K$k=*y@x#zH*ZLxpuN#;LwV5rgO@OANuHcp8F@}O0{*}AjRQPvL^rTJKGt|u`3
z`J{CY1GKNQV&eE%Axj`}AyAunr_&FYZB(?Z(%)$&t7zl|gL3B;>;nw32029GnNx<_
z-xJWld-#g8ck;0t><i3d%3Or_x=%{Y)%Mo(M!!G~ku(k#2Gd$<OtG}XrD8Y~#e>)<
zW!HsPb-yiA5y)QeFRLU8xG7m<+lB1)fbHOc`nDP$Hp6r-_X3bKb_{+!zl}|s+qf_0
zc66YY8<1naBy~qow30P?;g^dYH|^@oX8plN__|m|k_SqAJW;hp8J;=)2|zkE>!+cN
z?I7of#buJQbKD7}NL-Q74a#Gc2t%=3Oq(_&noR$CmuYOzlbyeGB8pgbEoUy{An}?v
z@?9O@*zq%`M_Ee<(;!Q_WXD*&MEQFL*-I~5y&l8?M|ZJc2{0qCDTuD}f(!b4PVXG&
z8B2|7BjSO;3bGD)M5y5VK*KxX=d9^%x_JIf%RFSUX^cX{Xk4M3oz_OGr1s>{4Zg;J
zV>E8y-iVX2H!py3x);Fb?H9nuqh<%~-MUSJvhMhCmaCvuU|4p298Hgf0t_v@MjL)s
zBDWVDA7;&(^AzGe(0gE_x*z8_ds9X3SQ<5r2_LU1Oj`9%6mq4W+?!s+K5_ugp~Z(M
zsr{-2wx7KOr~K*LkJHr39^YUn5VH-)J|3xO`|veexofD?Bpf+wFa!6-Jb85M{jYVc
zyTld>^-UTKHdJ9g9ehKi-c4nKn_=cOvl~uk!)+6z`(stQi6yrx;Jqy<%5zZw96piw
zXCPY*Y~|v^Ga}?tQ|==7f4}R>T@-nOwmd)@(X%(Iqxa_sHY@#*1?#@~IhAm`aPu~J
z6A2#(-K`uSGE5}!07>eHSl|DK!<_#dofT{FfVk^hJds-u5&+#~0)mJfrg09vEGCX3
zFZ!U3kR`e2_{#3dR5Q-qwPp9DFW-a8!heznhJN}}G<YPwlJ0#QkDacNCgdNMHRng$
zQx=OWRe{dfRY3Lvpk2K<3+yQym}4*`kr_srj98DnhU7R3z#-q=6GoQH>$2wnxrcjr
zO+N^m{R^M4|5!KmDdhrL)RObBnmO)U{gV!E%B!<ebJWIGkkth|2XR1Tre)&uSH%Y7
zU&D^B$2PeI)>WF`@SY2NAEkNLEr3EHzBrtJnI8Ou)|jrC2~Xu#e@5IR6tb?`EI+id
zXoUyXIKKe8r_A7qS`~kPl)oXxy}Yj^c9+}z&sxIkSBt6X;hK3?#H(XxwEg93yAeq3
zL-NaT$Z^buk_X}PT_M>u|AqgNPIy#)paO)K4lARkyNfK+Ry%y11aRu*Tj<okbnGCp
z9Z&h5ayuq!AWxGmzcvMmWy?MMg`$>b2pu_ha#csKD4;<6t@cCsS7QQ257V+A+G}yY
zqTeq<{&=Wh5_BaI_(*M$mZ?PxT{bk}$6;QQjm<%+l^?t%tW)!cl*1qLq6S&G4MY7q
zxArZ@k<SNAYJ@i8dUuRZ=|)aXs2gFxsm0$6_FEF)e3HJK42j6{fSl8Nn-wiDv?R{X
zZr+4M0F=ldB(43|R!fplng#Ilnqv11!oMlkdU%P=7}v(OYJ3W`9Xb{_l>0#0_qnk4
ziF#~=I>AArU3bPV1ar+G_X8K(p|_s8GA<D1LFJxY@CJ#e>{DWWUo=RRcM=Im^VPFf
zzL+HxVKRG`>itEr>8^p9oNZk=?F(SH)zf{7GAzc7NICmMva>$=kb@dkFgu^mL65Zl
zM|ZT|uVdwOC6%pB4M=xo^!kvJxN0!cv>HSH4&{;#Lvbp`K<;V|FJ{XpF9*@|sUHgt
zpTxL)6PF`mGi+@gbYI(WjHW^G{~{63)MojB(&T_w$Z8D>y3g@j1cM~R;&w2O-4_6^
zqi_CwQu)U2l7e1sZ&&_lIGCUG9^WF!*&3%PK>Y^fPP5)VS$6Zu2?zPSv=g!*kiHdl
zuLdr0HRwHm&L*!)iK6s$fQ|2)2mMX^p?%=6KUr^7XGl+W7Zx!LMuk7hC6$++zmv#d
zs6$lus;{{vbJ15wz@@Gx7KE71(4QBqM*cU%VMfpCn*h$VL*A1^FvjgSp&1PwDL))6
z9ARBZ$$`+IFg;nK<iT?n41WTnb&*xuQgM=T)^%|Kc6oh=|0T!p_Q(?v`Qmxf<H?&t
zk-2K{3kI^!4bj`1o3d9Gz_?y-wcb~t9^ImqS9{k-8ejh4Li^=+TJU?^Nq!YF)bFqO
z@I1J<lxyI`mGZs8*FL5qS@y287yN|?#*7*$pmJ7Li4Y3^y01>O+0<ql@eaOTZpU5#
z!W@>k`48U0NiIgbd%nvb2d7E2!o2&N=G8M5(F9xPILQS>;mL8eKjTR`;g0Gbs#x0W
zs2+ZY8J<)sNiak*o_3Mypm|i#om+(}-nVL)9Cp$~7m6o1;?y&LL7Bos*G8|s&R<^K
z)UPyK`h%CDF>z?;bx9~5t7KV92?q#(A*bn8rA{GuG_TT-g~1!Y{fk0>$F{Ne>w)s=
ziB%n@DrQxmOt^*EmuTTYy~qn?aE<De%9Dslu?$APe-<cWwldbHXzGKp;iyvu_HV^0
zkpMM*-^J{ypGEm2J5{WRJvg?uERS<p7Vmwt!Hg=696hC^##i!Kui>hJstT(9nA3K)
z`2lj$(t(O?co{u>Ustk&O4mR;zvYfG8#rlZ6)&k~W|N3z7}8=`SJi~y#>%E?%~k$t
z#M$ACE6Hdd1E{>`ApxlhU+4$-vtW7t?sPifspIBMgKg1DFVZvD5fr9q#GPoFL31t8
zR@@@&pX%8@Si2qcgOFHoY(qg5fkIxKApctkB5;H$Au2=nPj7iH_vq<QrPin&5@v_~
z><vTkfT5_yBIrl%d(gx;iTsih<>Kitf+?GjhY!~6>!gn#=RW_-t7ZT4Dw3MkreOy5
z3Zw&k%~3AhMS97i`5%P%8nuG&E$j*>Vb`ly63&~+h(EXeE18QgP-~MdDJb==iHLIu
z7eTf>6@1N&;fHQUUzFHx+)&vxo7;}G4-;Y@%nj)cb#0|8kRE5z6BwQvfx2Gc436og
z7y(uqN&z~WZDCSnDh}1lwSJvM0QTQ{ICaDIc9+O&MjHW6%$1fz?eUme4X>~$E;X4E
z^aatImF|U`AJ1+G_EA;B>O{AIF{^dIWdf7G)JJnVNe3{I*ni~)r#ZRHg*bg<Jf-8d
znZ2B8pUE5CBd*`$QM%u39JT_v6}i|9W)unp7Dao6r<Hm&6OUN$587^cTNR9l!zn7u
z!_AHoM6lZnpn66R*#b?z&d4F<NFK@PiXBvQisRm}Wlx~ae6|ZGO+04_TAtA0Jykg|
zyr;?o|Ldume~>pdM5AwMe2<fo{N!lN|Hh)HRXFF{Mw@XNK}|>3a6?>s80ZDCNQ|?$
z+E(jW3c=gQb^R^jp-P+Z%5%s}?i?%&SpfP!+?IEB@;At~Ew0}ji@8Tg9n;ThmAJUF
z(fs0{^goLwR(IcOgj$TMjn*{7FZUz=*P!GlCsm?17RBREW>(hUqX|8maJzb604p{S
z!|^F0^WDMz=u}6;`59{Z)vN9(jVz4G{-TzrCZd?@qkGl#YH~R``4EPBE*UipRx1;t
zTJfd(*cX6Hxbv(^+Svj}ceCDFAsV+$D~7(02NCJtCObVtsKjkG1IZV7&wB{TTGnD$
z68ZcSKcM4H5sbj+FV53mMiEECCU?ig>8Ehc7r?$yk){QnhNdXizKQyjO>)k6GHli+
zAor2~Y~~C{iN(n02~ANqp*V4ww|+PZJ*K*N+cDlo#!h4xj*1X-aFV##&k^E&q>O~e
z8MBQiw#cs6(UKIkt?XiqzJzF|H$_f~`ka#zlrp|kE*ya$x$+~#!I1MDrJV<0SR-FP
zqpQM*8Z&!YtKU0E)Cx)DFO+6;o>q6^-UoS{QpGa%oWrIulWnhrV;=_N+-gPKRob?j
z%i8{=9w#W)&wTGdDq?{36Mkt)+)<&s_1&d1<6Mt=-sE%Nto0<_@c&*Q>A(x;7j^!2
zx?md!ag!=_f_)0(=Or=xz&{b`l5@}KL}sI_r{gNJ{qQOoH^l*}_#=5e+dbRll3P^w
zqGk_Y=udmslw`?;t_kuSgem8HY^)W@o7ybC9C#KIe{-zxW6}69n=jVOJNXe<Q*9MF
z6wy9_|Hnrp$wo?AJ)Yo$?T%7GzFCW=8#*u~J}|&ny2@d-_&!G#ws1Z@d)QK5hTdFv
z!%f;Mc%vP4T9+SnLQHMHKSV*q8ae3o>Ohi@FH2HK{ZbFxLN3#yzB$y*NUS>q9M0B-
zGnCPxHvZbdhM!rlcVly*l^OOKNYVPX=cE4QO%Z?Yf|Y>DPwWu%hXfd+>m#mCg%8J;
zTQ0<4_`%gkkVH<IVd{Kab!Ia+c4Fi;DIK3%Tx(Rz85h-|Te*vZ53{~9obe|(q{~a|
z{uy_$v<4^H(`RCwNY=kO<Oq+QF`(3DAyDE;P&F1uyjRXJKe*vjpumktw66>4M3&Lw
zYr#13-xX6-HNZ59C!lJK6eaxDQ`5ABR;X~YS|R6x)L~TZ@22E;b=h^kPe&lQOWuVa
zSp?XtXqe1xXlT0p-NhQXsFtB84CAn$`rfBA`q(%0$EZ1gzcD*v=&LLJUOYGu06pSU
zLxSOX@exDYpRwlBx=Ie4U0uJoBr_$(3)v&NuU~U4f2y74V8A<(6`A<kH-APJF~%_v
z@>@Y}gajtYe!;4@{4X%fbHYOtd*w>9GXnD42<_?baZQRC$qOWcQU|vZMYWMb4=M^+
zV>>aITZH(zYp$tSkJDI^d_lXfEylz1O6*@DRUXK;i7}srUKW5AR5D{r?A<_zorw=`
z5nF-@0WO8c*`*sMIPbZ8Edr;fj4qj;zO*!EJ4F%Bp&<b$8_#f2zDZfNac+m~b~F9!
z2Jcw8U{n9o4H{`e$ehYtr+^?n72+?<M=_cs1V}c*I%Xt5vN$e%+`e+!xNd}~j+5!x
z-)|dnEO&E^@DO>!8*lPF@FsjY@+CjHW0YGB@^Igh+_X;k4a<@EmDHjvac|qQEf#OH
ze;>jK=s3iFEuuuACpfmH8Qsz4JaHMiQ(PikC5$`0Jz=LleEF49|4Bi0yPgnblpu67
zgwe0BbT_?B{<J@nlO_kR;up3j$O&Hh!^^53yG6<{?80iAE=tA9$Kb{0wKw&iV0bkC
z_hW$F``ll8SZa5QSmEt<C8Ir%R`H#O44bGIKz{=63%~|)KwZke5NFsC1{knDxu5>P
z;--jCgtDHTFly_9P}j`V@BnjqcfqF>@3K6U8hTy6A{R#Sq*<USNsPd+$T29<sgJM{
zL|I8RZJrgpTr4LmYOaw>W_mo-^3?lF-F(-mz8b|OSyXU&4_sy{i|AiT1n~;e%PbGv
zw~0ALI2D%~Nu@L;Bm6Xd4d?Zydo{yy$V6<38+E&y3dFeji_w%qJX^L6zFC;LD;0it
zOGD)86ERzBU)}Oq?D!kWLoI_hX1O2RRm54=A(&|z;|yxmV3IDsBbVfVD4@CNgo(>f
zSq!^y(}S4L#;91SeVv+t<Et9){j|^BUl$O&(~M14c1tJ!Y%edUi~LM98V7v=d~r|v
zY)^Wg2ASrAGbHd5j~((Ceyiorpb;;C`PmHZEa+NN!ec|y3t+6G7rd)@AGA?X`_<`)
zeR#a^!;hS^US9BwJLxTCr5ts1JqO!jvJX!39)83rWHTa#Iycg#v7)slYsoAv)x?J*
zT|MZ(0I*&AiMGNM3vL!kM;?Oy&(KcE*agVr-%P!l%BbWM5Zv_9mg69c!Ve)?94QAM
zhcs7MO*s1!Q%h(FRY|(vcn4OWC`zlTX|M`jm$1ngrw;hf17vLhy>wV&w6PCvx+84U
zgO~hHI5sEb*5>vhGu9`@O-<|~9TyjVZKB4y;y**fLtc?47HA#Hy>1GKaosQACUaL}
zWP1PO_Er5wL>pXhCN){k8d@|!H8s51VS8&V{y{4DV1ee#P58iLXuT>OkcKSO5o+#)
ztNl8<5Stq(+{|%XK)F;L4d9V+`6C(}xo-S~ol&NXd5;TgPW6<gyppQ-YQK(^Z?3bj
zH6V+0$6c-G|B(Bqs0@c2$;=Wt8VM>6riD>uWR|8pt5Ro6#PgW4_(_MO74^DtWUbo}
zs;g@+Ynb0|09#_&U7|wS#WU6RPTfhmQ_o#zYodSeEMFXyIe}b>6X-rUb@JSlyC?>0
z9!B?s(r5gM*q4L%B`|}!f@le*?~>sg8S!5Q&MC6yPJZ5^I|@A4SW`l5&)3Fb_`5=?
zrUooJtZX6S-`@qhWN9Ft%L5430XPLng+<doiAVB7AqTQO>?Wzb(4xKu_VL0?G)ja>
zR<CdBk;Y+&_|&Yt_B20OmIQP?l9LHK-ud|1MJZ8}DH2%aO9;ruF-iH^cVIajiTdn8
zMRUXSBpUrVMusu0tuChHNa0DeBLN%|ay9Zn-V3ZKDIZBhFvfi}b<^Yib53mXc2N7$
zIy-`Nb#+4m`m2KF0_E4#P0~1)IFJws9FOT+w=j1vKy{c=_3W!UXil8AR8?f5lKe0c
zq#19M_;H;dSW?-jz5tVqeH#PSL5KE2KAvGRuD$|G?M3f66UKOv*n2$AvA70M##Hc!
z98-N;7^TCMuV&5RE&9yMR8gnHqY)yt|2L3RF9o@r8Jng`JDB6WqTRcBqU)6-)g+@h
z)PWDCGPoKI7eD3Esg90x7<34;!EdxEnzaMv&n~O)xc4D>+UL1JW3{VrVXcdFSRaCz
zAj~MEw<^yxHPG3F%xZhjSM}trNsJ@NB?Ic*RE2YA=JqdI<J>iYPyb<ernQlu$j;3<
ze~Ay%jq%fM`D64I9A3U9cra`-9DGE(y{iyfX>=lHeZ_-XU)R82M>+e`#y|4i7O{Ct
zDiK1D%-H}ivyc>U)%mDGMsC2%BsiNMeLZF^SUA502!A%`q6^mwz0JKL%r|-4E%zzF
zj`#6&dLH5?ep8-4C@N+|g%U)_F=n12(EiHKM=GMr$-zLa5zdKwxWV;0KN9Z$^?TL3
zQRV{xJ~@ue342*o5Ur~W1#=E!t}W-DA6*fM-ZhXtWKi?Blc-Pu5HwJdfa?c(twvWH
zwdSwbHy)@yXGY|E$T#DJ%(b$8jc*w?PX^aP_*9S?ZTavi*s&|e6Gf!tyJO$TZZZS*
z7(KB6)_z8FCleARigZ`T%O3?CE&dMaHES@#KHgd?!?!FNdDj$VScenhd{{~vV(UIb
z)MMpVlcgPT2e~HK-V4Il(ydn|MDrGh0%mU;7beQV>)^*Xr8qc57WIA4XV1uU8Nn|A
zx1gabfi+5ang0a{gU{Whd^$z71%rX=LOqYxun>q3Ru`3#_U_tb+e0n;NYi%4aX0Mi
zRm^T^%=n{PIzOwZGcXE6;G_{qP<|kC-`MZeo6_RZ(NzuyqIfgY9a+QHVpOB-S^WJ*
zeD7xx6&S4%YXu_+VL~-cdBG@u<w%JA!1-fG@|<xFw)Q@_hpzaGi~AE|S?L^9Q$!f?
zT26u;Nc+?~hnnIv1@~^`-|1fft&xt%wqT5Q2h^`fA(P)5xQn?Oa2K^|C(m=rd7r7>
z<17eV+%#0)t>YvWkCg3<W|VxM6p;2Q9yu1+a`!_cNi3cJxtQv!Kq&m@fan}>jAFT>
z;TB-E#bf%QcC>VpJ@VGt$Q?DMNVsqbor3#&SJq+B_{`$f8HTyg>i)tZhCy@0j;di;
z*ZAYycPGba1atv)18>iBmZKA`CsD(=F2%)FE%p(-(Lm9s4x+82gK1^=`Z%j{Gd8n3
zjmtbjHKZ?`q)vT};0$1!@nD4+_>6*TnEUEOoTc;8Y^`M-w+=q3ef`&gy8~IaKbx^u
zRhtRUE}EPL908li>F*ai96?<;20Zu;dD;Z|e(OA9N|7DwMaLG67jtkhwSj%Y!;Iua
zY(jR1*d(Gzu5i2JkGT8QZ%M?1s;2J|x@kv5YX2eaN+bS5+QC7O(z)a4Tc#5aM+9T9
z2D`#d9=QvI=eeVs{!`kRek}=>5emXha_9Jms;JNi&ARN>?upj7W6XX(lBq1L_0Udl
zJ39fl0-MEh`EvNFF+K)<{9Zd<Qj$ni;|`Pc-4G1j(5uTj*ort*0XhL_P1MM1t0mvr
zHeN<SqiRr6QCwXQx?<8;h!_U;6GDq!YJXU*H=2o<@m8VP+{xnbJgTI8u5U1U(j0QJ
zR*CbI`EGC5NzV&_@UFdn^+eE4DQ3PlR{{ywOW}f%n7tV(S(T=jrlhenFpfz*;|5;H
zLwGkljW;A)=}PMwhHqJS%0Ba-${s=%wbvidJ?2WFJYjf$A4R^l<g2Wc*#3d-)<dN;
z7hNMH669J4USEk*ld{Y5GQ;!w7DeNc%1-GM!C$UlyQ@eS5lLnw0Y!tDe}&a$_6ms&
zs1EKROj0-03IzP(yT2McPjex@6)AN(j8`HvUn<+=7&qVV*$D9lD($=bKa09IbhQ({
z0L&>4l7D6#OqAn}LF*Ns#ohn5F*OkX{@Kj-Ez`m*ug)Pppx$&Ef({j<CHw%QlkO>J
z-#3`WGFlQ7;R+C^{t2!22%th+Xe&;np*N$nBqT6aw|{kmkLwaP`g0JQ^A>6rP^CsG
zdn2pN&JrdWXS6-*$J%_M9)p+z+OL{Xq`^|GM5{N;$6CXTQg-5N@#5^&YAHe#CfL5A
zyal*WP3C$j4ibDcD>sywHcpBV)YaI`z#XVq^FC&Gmd{x3b&pwDT7GmU4YPRVz(zCL
zKRLlc7lC`fCl_tJU}Jq|8yOygD~<J-x|KY6@h7@0Q;@aXq)xyv<5sH>CVu{IEa;<!
z7*pM^59l?=m8SQJk)z)J7~r=Dnty1Tl?t@GM}mi~F7K>)Gt8CZit|VU&2)Lw?Q)lc
zDUsjq(pe+TeBFaWFmANyX3`f(dHO5JCmo*4ez2tWBap1X%bT<7qCbQA3*m^DKkwk8
zZ8J@fq(85zepfv$DU&yVBuj8#uIN!kIc-P35c8S#l9sx=y9#_cfUVIMk$GD#o>k2A
zY`t`RoDgMwxyKL{&fKERbG>YlG)<Ul&+CqHR>!St0pB>2zv&7s#c4rw_kuj`wjS~Y
zB=cD9EGMv3$>%;FJC`T3?*2-hNkZJvLJPgSE|aG2TTr(ZX4$$fHIS8ch?<}M6Bl3q
zefkXCugYEF2m5h*5Baak$#8A)ljJ?xkheVCm$mb`P;2%8Exxv?`D<<a3onp(!;Yf%
zlDM0tK}>-PnWI2<lh$uHhI|$Y=B^u(>}Hy8%Bmje^fAAiLHSGa_3gf<wdp(tz%%kO
zfvu{?@jmV$JT%P<4~v!*n${(=?GQO-OOs|RaG#n&bzfn3P2czc!Q$5WK18bEePA)L
z#RV;N+lv;Jz9VX_;N85jbcyRJx&C>7R@7XiD0bNx8IC4qW3Rnen_QXEYP6G<C!{9g
z>9EfE^m`vq=Sh$8s!wxSgVLRc!(EuerZ>ue=tVe!`X72RMS+ic0C--aAS2N!a3h0<
zz0ubOt^_u?ujKT~)ItltPi7jg#}&|3^|~uYzXAde6cv>7(PT^;3)eud*CjFlYj<Tn
zTgcx?+*h4P14k;m0Ic=Ra?62!Zh&JCbAkRhfJxk58@}Di9j8<Xjk*i3mDqUqs;A*L
z)h<c;jR=S1sBdfug1`3jNsgv;A4|iEK>ExSrQH{kBZf2zJJ=cQCmp5`_pH^})7m3?
zS(^5qR0<5P(#pc<o{;5m1+VKpqF1ot!Y=+niKG9ZMD71Vi7g>gd;CgRCK5Vt9=b`y
zFWkLVsw4cB_&M)>yx%0)(G%C+Iylh%ttQ@0W#Sea(Aop!4t+b-wC)|(zA@yI4q-zs
z&PbfyS^aGVq?E;1f@w7Bjc>JI71a(Y<jHL#ek62%!;LnstiH-rmDvR_$cz(w>MfVZ
zL(u1P2Uy@ixuUPMTcQ>T5MW;L2S_wuewqW*LNcD&bAV`_;-=6^2g6ES7EfdWxJ!#8
zm1AaL<0RS~|MqpOAx2P%$DCQga@Uvpn99!V#n(FN4nHQ<2dedzaeB)}+`lwjDFIWF
zQa|oHrO}9lQDB`CP&9Gp(7;t}YYNU<^ZTU~^>dq;jBE8mta9JBp4Rk^)S@ZMw$Gw`
z9sr~ED{}X0AfsLMNfG4gbg^S=;y&Ww^7~IbC*S}0(BnyP(gh%y?c2lKq?~s%6Jubf
z_}X8yr#uX&`a5d&zdn#p+scB%l$ys!(gvTyjWJVyM%9EqJ$fXE<J5L>lgOm_E=QN<
zsq|HT@8rm_oTSK9H8%c~ql{3=)=wIt>sCe|x|$qsS&lW~c~mcA^|NR&ny=`T&TrcB
z$mODSHtO%Mw|_2u>>BfLx$k(uVJ_pVDZ!x<L%ZavwjU?~kd}RiQ5^iCZVH<a^Dpv-
zz}?sVF93!Aiku1IB4>C<TI3xVDld+RBBFwHUc5?CzqFB(q^=K5ynE<zxjibVcq}AC
zcIp;>oa5RW5)mM2Yx-56i5|NI)inqQp;+mHn-g;Qp=Izqt^nVG0b#c-DyYxNoFA~}
zT3(_I$%?>Cpb9<zRIs7G$r12ZBCOVS|ET}wx^Jq^d6h^WMqIy+$Q5(Sl@1weXcI3r
z16%)gG*;XYIEtEs=lpa!OUe@|np+W*;`_t3&4k?%CIG%P-TaKo*Cg2wjdY#QbmfN0
z8=jaztz|up!3d%(g+Z_`Iw46;l+GXMqZ&4YW)4g4^HjveSpZsa?F2n;#K|Xp8EkN<
z6HA8S(m0G`V`9k8(79S{45b^-i0FHi@7XDi`J>$u;JR<NwPsT-P}SMWdVqzQ^nkkc
z!N<`qeBJ4WR4N&t;gn?*q%e0c++F{NX?#0;m!0hdF0D-u0uquQSB|(v1Y2=bARfIo
zw(zozXG*BGT2Pe}!ugbt1;ugi9OoFSz%f5Rd>`}~o0zRT*uAL2q467OFN`=;ihoHu
z;0kEh^%s_TtM3ZOC6w%L72Czoub9{d$S+_O1c*OQ3P|!eV;)qpE>IG^ufD;vvVq<O
zzFc=Y>P#!|8BOR4{LZ}|rc1@SowhGn%X1*;@85P0SrJN&8*kbTuZ9r0g$Sv?@q$zF
zN2>Uz4R+2Ci&alWyEVZDf;Jp8f0veoDW|F)41Y<(mA@QPnDS1P-SKWFeMvw8kgSDi
zmp5V41xlxV?`wfC-E-Urhed~LnP*zOa(;%s@m9_<Bqhd}I}uN(yg0^t%PJ?PJ*vr`
zV8a0AR|%HR=Zw>W^B&4ldC!abIkV!zpp*C%gZTQ1QqI|fddk;#;za_jF)_<Ax;C47
zZ}KyHSH}&ApqHk6&p~y@<{#TubN5CfgU2KiP9rXgk;>lf7asmq`V-ucKWtTDn1OuE
z(^q&3rk^vlv9t2@lQ#25wmfGZx<Sjcqj$Dins8SXbz;a{%dp-da_oZ`@P$m2M5jf_
z*Q5;V6bu6!>5J)9@rni1L~n5IA@9o(=QKaa)?_0a0VL8Oq<K!934cY<c($6#F)Gbc
zM3<+N`V#8CVur8n-IOEg)JSm5mv^e*g-fp65ILPhg<I5{VKT}cscWtzES?YtZ-Ntz
zoIo@aaf!R-;nphQW~63x6t)+YT{LG=s+J9+1;btBz6N00!3If2V~N%^MyCg0R5KAA
zX^(>Ps^<NQvSdUVthEg7YzSWx=g6n~kG$BL^abGlKRsSeR>;~&AlHI_xVYh&iW4y}
zgAfi)Wi=Ds7hclyqU)hxir)ixoSkdsXmuKmKSZkNjT`#&Dp$M<CgUXRpC<T&2OrtS
ztjqFhld_XAs^J?U1~dE1&(x;2ny0oubIE;-53SIC^a7OxkgFT1iPHX=&>pngF<A@p
zl;jKoiX58MI>d*PD8m~wNLQP)<Yw6yQS64QO#TxmTJepLE5uf%m{_v(IBV*6$sn=U
z4pK?g39}1hLQjx1)wU?3nG+2iBG1$;WUs9QT}#0c%Y%ZCpC^b6A#!)34hA`=OezoQ
zr-FQ&c`)=BzybaBFk<eyj~r!)=n5g9tl){R==&@;^kFrJtQUZZ7+hX*!p~H(tSd~(
zrATP<8;h{HI^!HXTA$HeK1L{Ye3*#m5&okWr;Q#fCTt>Id{t?VeJ?a<l;@Pz(R{zQ
zLqFU>zWOSF|L)!Gau;AdLP6r)&3c6SH+;Jmp>i4@0O(*O)zB<|3rKa5X<+LyAv+2{
zBFO&z>ZHGrkzvv`nTzOSrCHsqZK_xs(vtcZT;5mXf5?T=IWfwi4*8dQ%eQ(ebAEil
z!@c^lOnUzYzsn4K`8Eg!-}{l7d}${&pXk}nc6!O>8S`I&^hj8FVxYc+Q4)YC$qi8H
zsZBCm9pth{v_=R7l}rdw8m}x%BM3=)XCe)z_txyRG&<p(KJQ8(BLr;KaoR@v2Lbj7
zi148_hE4SroRS7K;IBOqXy7tK4!V`JHzCr(T5@e}Wyz94KB2hbu9VlCr9*hUdw@9(
zbJ@#2-}KG8ji@NwPjvGn(>aVgi#^ia>eKbiD8jkJhGDy+R;LbrUrVM^BYT7mS4%jC
znL`P}39kHQceKkOE``!fa&dUL5wX@bV|510T5i<Ayl<nPEleJFfc0zd(5Uja)b04A
zo6IB`BqQvG-Y@tPvxMwlV=g&eu|NHViL|o@5O$ePEkkI#1fw#9RUtD>i8++cz2-$N
ze7M{z#HC#F5?EYzzp^bi#wf!(J|@puT3Y_j5n2{`k{oh(+B?#i)D||Q#2NyA?Mf?b
z@oGAxu_k#{AXlSJS%no-8(r9KO8BVS#}H_a+sY+iFM$;J5d2`@S46Od+H{6P`D<%F
z-oymLsvXZmwQZ^P{X51g9p`~C!YHbyoY?Jkx$Nx;yG<~TV_~%@I?9X|m?qe|6C{h?
z*XCfLl1iEkYE6I{e0b(f*mt*QoW;(ebRx;FhT8eBw_IWiUOv14>VIphUWwabe8c)p
zsYq?)wBK>f5wRuK=U<0%?AhM&@LPvt?KE$Hj0=t)c;~>VVL$x4G$D(gm<8HGV91-u
z>!+?^$XNUOH(+*kpu%N(5?r$|)=*}4kG$2H40*170ZfF+&eDtK_Bs+Y#6ss`sQ+|&
zHT%?AQ0hl=lZ>;O_8z8@#M{1sR`Fx|rt1-_U#_KQZ~O!aD9U;`?>ompNAxl_XGFDY
zLQB$T8p%JsJ!RS)_GzcMJTv}gru<(_ePvi1Yu7H;QbCG)LyHyn;!vP?A-D#I;x5Gl
zrFd|6cPs8z+>5&hclYcw`~BW?&i8Y!%*>U^OlGZh+r#t=_e(z?FF|k3XF+hUHsus+
zh9>?i@}~ij?Nn&cML9G`m=-Y%Fi15lCcWuPUU`7WozK(9n=$)bEk~%7K=C-j?fwhk
zTDb@z`8r~({ssGuf^6ouIcrdDKyq3lW3zBZcnc%>gG-r^Mi+vjgPD0X*0wq}<{Ysf
z*|aEi2+;`gp<+s)w<%t6=fMeYHzsHD{c(kUsDtO$Ajg=@EdT*va|kg&mkAVpYI`Hk
z7TYCE5TYkqP{V+cevA7KiZS!KqiWJ7Uq_Sq$VQ1r23`b-SODr98RUGsrq8f;g~7j5
z4_UZu1>vjl*6D|5$+td9GX!(kt;X6vNEKF^%5m+qSQge6mKpR-tuF;^{$_7y^1AL`
z5%~7h;L1>6$dpF+o4s0bKBa<UD3O~y5Q_Z8Q)s$`#||aTYv(~-aY*EwcIiYo$&F*c
z+qw)4jN-IQ^irTC;)FLy2OcS6pVP3|RfbZLx(1HCEjf5tr1WJC4YS+Dz?QJdhQdN^
zSu|mijMrbI)7YM^%Kl!ne_Gmk(Qmxi_>i!=DQEF3Iz!4%dn^w>S}8vH;CD0Ipi%$Q
zlVwHu4}$V43x_Fqu!}LmbB@ofd`14<tG$d3_rh=CEV{!2@5}nB=ST}SulaLJ*hj2s
z;)A{q^k0@s>sHk6-$uz`v}Rf|Giy5<M|@8BelHffiui*Von|VdSjg}W7*Nh-n!lGh
z;62m`)hNhJc^U^(P7!fG5B~ew?w%?yST4S4P$lkE<m_*A{;SiEn}Bp;Th{L&5^fdw
zCQn5mc)SWk4+`B92`+NMWWRd+o*YIzZ8n|4Fi|xAo-;(%o&%ep?=DfwoGw<;4_}Ax
zMJMkfe0|*mZ`38>bjSB_C?no~Gu#Ld)uMje#IbwKHrw72>2`cub7MK~iotbz8qRsx
z`?hPm96NyPj(NMuTJl$<_El>AF0X_3gIiyis|avw|4Mv$;F{2CE!)UbO81}Y;1*6F
z3wK5rf&POeN-!7_6VGjXcqz2(#~u3L(u+qvv%+v*#{H>q@O65Hkm{KwvW5L~yj05@
zx2YXegjQzm+}E<1<gF4lo~pe}b93~h@IVSo-MU$2wH5v&Yh!y6W+#5{u2{LR(H)+o
zzf9@qB9KchdMG{SXau^&XR-qWWHn-@4+s)O;|#h%Zse23=&_K}Z)c-ql1D)ls+*PF
z<=^tk2C6F@G6EZRz)tk*g`j}tWvbO2!*R$o&H-Ue^yA;4$7+X%>3cL2gUTYf%HQoc
zQ=$@50vYPq*-!_=j5y^g9)X|q?4jOVb6R~yZj}g$(vCZHL||RlURCX4?W?5aAI5Z`
zKUVGM%Dde}g_H&#9-a;%Qh_1wDeFFnd!NSL-h&CYjs8JeHN1ElZyxA?%K$KK13<2L
zVCLtkKlseg5~ZaL^4^C6LJpy-Ub7adT@Lq9-N=!kQ_tvea_IG;<;e%k{Lw!-93^l=
z#CIe!5d{(1o`B66L)Vr7R18DkeyR$vt>>JlRBxV{{P#r8V5K3!Oh<LfmtE`R-ct{1
z8a1u8Obs~hp>49T=o6{J#&WZ&)KTYf6n+%PF}AxWkC~!;om8-ptP~e7^CiKm0L-|Y
zyVz@#IBFn3%?~4KBR~_k%p1#cWu-3uo^#0vrQ0DMzXF_1LA={$wA|%jJIR>sq98c$
zV|W-NfZ>#pam|sJ(HC8mv3-+H;&!M?_fdSYw5-}@nam>thAB$Z{b$!<XqQA#orOau
zrIg4-pF<F@S2<>x$`q_E>J|QXGmTEt+V*AC-_U%^flpN@URxB6Mcq_vss@@Fnc5kv
zR31Ao^a$FWVBu}Z<4s!!mK%gCy>uqd2@l@g`>r*~l2f(BeNGMLPyDK9`+rE_89jj^
zaivr&J4$~sqW6`1i(~~GTLS}PeYFB7B3fa)#D~3FurZb6cbM6M>$^>e8^-Lk#P2FA
zZQ;{SVkB9Q)!DqALa(p;L`dO+s)!$<EO{LF0j>2+FF9OU_cIuCXCww2%*-gOqbWho
zyRguW_Z9_FZT$}u7Y)-6&x~RCSD}}Hf1qFc6p=oB1rSFs-NGJ)x1EOzQ6|4XPqA;(
zi9FJaMDNZvcaVba@>qkQ0OGaRTS+sv@sk7Fa(}5;#O^F$HPH*cl2aJ_;UA=g5(+nw
zW8{0kWAy`{110a|UcW-bilp3q?tO0S85y*f!iXPd2fEi+ksv*}`7%&6&d?uH=r>>a
zt4tb*r^tL@FD(i8LkivR{s-xeaPao1$XY2hR6mtZAXTHb(aM59YY68_4iGjW^kplE
zv$<z26F_!#BVG`xOGzn}V@NicA8*$fu?Of3D4Qj_WToT|9?Nb?#{VEyDh_N_LJd`E
zTP~<gY+v0L=_sMMi;!Nx%f{k(lwdMsFDaxHc3NY5KI<D#Yt4xDJtH0K<q9oUrx`Wr
z>I%+b*J+{-81@Mr%Hq|=;BPtGt7efN+IX?Kc+WPg{kqk-SJzx8ro<btPR}JLPeT!o
z-$M}J;o%ZAoilI5cFZ4>q3&I{JQ0DkPc6qrI?uy-WTv^pCd>*JAbOC@ds7rbN{>v@
z69>JzEHB(?x;oJc{eDBj<5N3xw3r6F<nbZ)BeHT&;pE)?MVinB3s*(0CDx~!0F<BJ
z;Nn|u^Ax0=Cy{kKEH?wj{S*kJV=l-t@wNt}($)kDWc0lo+{kZg_SbA^kZO*>rVlfV
z38GqZo$^)i(5P%NYKkQNO82)Rifk}m%kQL+0h;twV)}cl>0+hR6v|XEiyD7YP+WB+
zg3I{VOEiZj7YwyCBN$E^U}QLf;P}%{+@?#}rr&S2UTyJF2q=d&lQ)ow=I0be)3eKk
zvVS2gWAPd1MLz*eHqW~b6Oo@oL<w55_0w0s2J!A+m6eDc>`WJ$85f`J<P(OM3k)JB
zj>7Ux=)X&$Q~F&ElruhSu?otYc^%q^ko9SqLZVRN>nLtF0IcDv%|-`qg^`2uErvFA
z4$UBj_0e}}a}@4%@~qsz>>)*P{eS5`Z`QD@;I6mSmm;@D^^Q<Q^h3x=L$Us0!l;>@
z?W}8LPmFZuP<NvuFXUM!ADmc3R`TWTZRh*+L#<7daZDBI4eoIBB}wNMzR*1J;TT2t
zt&(EI_om;U9VM|1bum^++O<fChUbYh*<dE$3mX11GY6&rpi7Og_W_2h!`bL`ek(DE
zDXpQ!;<v@P)Cl7SkcZ0Do{+q=$X>IvA%TORFFI{b>rAy^>K8)xz9ZuciU75gKPFSg
z)1>C_`Apc4wRre{lL!P^j>(UEM-1`mSbdl}U}dOg0ia0K7^wn_*?h_L&qzqN>)wp?
zsQ|MR;auvExJCSAd~@&@u(S$?(f*>+Q%7j|NZ=lTuTY|z#IVc1fW;M9W6%}(HRL1N
zQ%U85+#MG9J)CsRo;4rTvBLeGXt6aT_ayAhZjJI!oF{$uwRaI(bx1MD*Ia<4m};Ro
z)JN@MiaC~dRN~SvsW9d>x>R@-aJj`g<&rDBg--MmGFq_`6_&pX2Fq<KG@-+yA0nw%
z9A-m(VV9GkrD+{%L7A(blJu{3(nHyGgT|Bem_X;>R{uvq$@#|)!-uZM3U)8<bHoDm
zYtgXUoB(jD-rXkv6BXB9e-ilz>BBJejtKG7YM@WR$mm&w^OS?YEAd@N@4x!Wzv+mT
zpGCyT{&+GG>oBaXh_v+?;vYK1iW2dTOa542=y)j~RFaV?&?n2=S&!AU#usyR8_~(8
zorUknL7j8fmQEnhr}+_%9eT9Aa@+*(?vjZ94E8jK(62E%a(<rpJA9}%DcrnN&+Tr#
z<~mFH8-~<hbMQKTh#_^N@B3Q5P5&%Mgn^v$F{_40joO*_$^AjSQjBYUrcT&t1m&a5
z$L%m_a)Zn0<!O!2lcG(5JM{kK6MgvSYa$8^`SFj5n(+7<lIPj&7X$l|4tH~+k+nVL
z4IjX3bj+Dq2p5^<@Z^EP{KXXrCp$Z*SD*A9-2pb*3}ZN!8!Xc-g<eA-K2NN$aI>S_
z71R8iFK1CdlJ%P3wa_8Ij=1>}-m4hcPlveOb|H(LxJ>bwMCm4L-boXC-l%&s5+M(>
zG*yedVVcReR`lN)d`#G*$G0M756+ns&?<WRy5niSg=Dk@iM{mpP>+v=tbD1Y?qHF|
zBS6mA8;$5EzR4MAP|8ICy<J^7N>EIA=ijo!FgosqCBnZ@R}(r$7o_lIQdUh{M!_NC
z867lSXC-C*!LVBKB^D|#-|ZDT0Yg7h*lqGXG|a7kS9)}?MnqvUg*R#K)qD&%E_WO0
zYRp|kwuxn!%iXD-n?%qAf#2<5S%#((<M{$^;)&qK`~w;O1#!1VrgP;nxxlU$5uC)l
z<)}2PZ3@YJukJ|7hko$FeXQ9yOew8Ta-<l|nGdCna93`=L}r!!r|VVM0PuQ;O<U(S
zo`0Q<@Kc(yMYmhIM%49(>0_4&9mRsnpmR+=fhdhsT)mDLPq}ak`7d9iWoJiwLVL-U
z&|j9994n%WYR#(qvk%wxPX3Fla!z?+mTT2V+ZAxNuoyE3`0#9p+=G4E_yLAKRpIs4
zy~eLaM$S)qGc!5|izAA^<nHVJ`s}5jq(|+<-{mUM-|*GQVs2U1JxxB1Yf6;~Id+?5
z+5|X;_mp&uKivX43yaNT?OCDNUV;e<iZ1mh*E1-?Z!fE&p>WW=_08`t3?PE%Xr^P9
zzSbS_$5hB@S~`r));|H24=_#Pk>dH$(@hvULgTH%7V-sb@$aShG&@1Nm!+;6gxOPG
z%1cZ4ey`b0(|#;E`TEoiswCu9;<<J8diJ5IJ1xupURqopXMCPk?Gg#1ZNa44FkXF;
zsGrn}^}{=K;Wzm4^cj6uHzC$LN2il;$eap;O@`O?A$GQ2-Nkgd?9GFJ%53_pe%+Bm
zp6U-%wQV9&b%cxFd7uCOgXGHH2QC(p0m|C%B2vQx`aq>a-Irf0X&LZnJ*RQUL(6Ys
z<mF<-s~pxo)^MBm_Qn6eL44A=A^{DRh14kVtz1M$xjh{~q9z5K;y66t`(#}2>M@@!
z{En|IS0Su`y5%CYs!p#&@;w;ol>yf4H3RgGehtk`OW%GQdz~K%8c2IkE<T0Z+0AwF
zbJ|?veg{S?GZTuIO$Ge>F38#-(-+C%E@cvg)k>KQnh#z(g^WONJf;?V#^W=EZMc#u
zv&lBbe-Rs)2e<D#umMK<gZTbG(6`GhA9clMT8K{?5D^6_rSG=Qei`M}MRmL2?dD=U
zGfs1}`V)Q1ze+bEc?_=n$j<Y8(-O4v!k&LEur3l?8c%I%ryj#T<{c@|KS(ZpstEUz
zW}_JjDlyagE(!@6KPdmJHGsO25MCHSxCo_{3TKk}^>wMOz^wH+1NB6&xOa^-TI{&I
zStKexf*)f6^@Y|91H!C0KgA`)^|s%#(6ESkB|6QQTBR;=qT^3ZoxF?<-p>6Fw24^A
zdpxVSrY(|yb|tDgAuGB<H(0N`1tbcYnSnRhT&7p~|3U2s#`c+G5%F2CucHoT)^l%L
zjOE&t-d*>}>|a%M08|WDD4EiXvWVuf(JJ(`p!1nAhplP~+f+_V2+WUCp#!$&I(+o>
zm1{25i4s2Y{RfFTZ>LRJBw!Jjb^LSwhilFfgdaox+Mm%6Gwq|^-l#X1pTBh^;3Mk1
z%It___T2H4&_C8Pb-?ie5hKdK<DjKmP*-zg(DWz=^^JRHlfQtZhb?;iliO|Cp&IJ9
zMkT}?!e7&f>*uw8@-ci;xso<>0Z^D4;aYezh~ufqKCJ~fzgO~<NgZwtj_t-RJg<P^
zHmN{bsfJ+#Zm&eN*Xmn{5*#3?!x(H=hO_y%{N{sRG$DZoBH9(go6nY$8|#*(Fe%`>
zWGyEOBJkf|jf00WGw5SKw0+9jS`0PGm<yG4Q<g@;1si^i>9<Tx_OsK=@)nYJ&HQBS
z*5uXkM(^ZtgsSu8<@HnPcq>0=#S->y#}%HgMra*j<K#(}n$gS2J3+b;_Qr=ya9@)h
zCLqrAnfosu$=-2_bMIreY67~T>47e-0T~1_ZI(hiB>n*y=op2$sM9`bcBX2L3fI-H
zGa&jVk13V<2H*ZRwQoPgV$6!h$x)-zeAM!viw*e);VTj!7A`-<#_mT+?L$)6{ZmF|
zMl-73))>)4T@ed=Q2rTpJ8>QvG7ZAXF8eu@*EWAMuO<Afjd=%npS?@-CPQM_?*2G$
zWeL@&<wjFavgkM)biYM?r*ipJxFZxUlQm4vuW(jQiv-9$3*0-O4htBo!nk&o81Nlh
zsL&%gkA#HlGPssxNNzs*KD)*^T$Ajp8#A*Q2UK@;xNk?snN<n5FBAv(zo5yqhVjie
z^%sqM)cYIbT0yCUhrVia|5(2J({NUah8ml}zwR+xB9{1hK-w<J%tF8A?tMJ9D_bvR
z{KW<93TtHC7P$xB@^$GOHeuVs3f`v!(HhdtS!ok>6Es(9IO;P4AJ)rGELHn%7^piV
zzJ|-+Lb0F1k(VIRV;C+_|AS#$MsT-MR)f@3%~Y%_E=CBQtJp;(DVqLI)uCS}z}MES
z+LSG65(t5<Z$~u)H<*uNw0@vS-EznXA)&^22gsRnnGfBcMyw%9F2jxXwA`w#;=dfQ
z`;t`-)?OYt-D~IwEhm~2keW*sk&Jnom{DJKKHU|ae3b5*_o{z`JUE7KxUZ?6cR*wI
zy-wib$f#>Nt`N)gQ)8W{`O`R0<6Gfep?NZ|*_DP0-s{3dGWLc{$m}R0fcdy|b)LP^
z)Tp|L&xNX8;3wbpp%(inBKZ8XI1B}Qw#jm$Bxfmc-IEx@6-6_}A@KcpvenqtoTHph
zm(47YNp;2qmHn&ht!jLf2Au;V>NKj`BNk~imK(<@9cG(7(LGzZo8Jvpu`^T*h_#C}
zdxi#276DS>b#)#x-w4_~v!IrpA710S-|#k(pxaZRn<*fWPU?yJjU;B2$bU9;rSsVX
z_oT1i*LpH!j@|(UO?Ma=LII4P&W!H46?r*fr2K#e=seZ3H}|#C0<AvjanU4NEH3cE
zGlH^?wPU}CQWHjIZg6X>xBQ|=v|T`uykmQ2Glv(o&#jA8m;zy?z`rN9`uDju!!fCk
z(9<cRFB2QK+hG4!oarmnaXoTh5z|qvNmE_uWCTB6A@7WRoO{i@9it^<M`~su^o7I+
zBQH+&Be1pSk9~Q`LKy_I3Q1y@=bipjdJ<tKeg$PP(}R0@N4qUM0HaKAZMJOfOd|6g
zJ4HSxl;0tSv_z_%!-W?Xl9!p-Biq+lc#JEMsbiPQ<*YW`1=+Lo@_3541$yUi?LNqP
zsv>B~9p?woNvL54YOK~PO@Ql@CuC?EKZ8tTDnruBOBJ{yY|y6^pz70$Y*=7FphDq5
zUZ0_cO*@6f5A~Y{LMQ!&K|2n?M9$(bUG4@&Jq8ZluD3hYb)XPs;quhCG&bBONG8>z
zpT<|wyq9l?g`hMvI`<t&5p>L{wQLOlJOtMQ5db>zBvynx^ag+9ToZ*e#|*LB@%ZNG
zh?Sct%q!tS)ON7>b5pdop2y2CSiINPk81PRG|@6&Z4TVg47{c4aBd<Mpq+zTj%yES
zfY%uA*3(XhD?NV0WNYNG^N{wSR^(;!d+A=T&>Nf%e0z$DAy2g+#iYWwtsCB)<JZNx
zhw^_aFA>PsM-2!+D3CK~6B)koTdc}L{*pXx`Fw85j=%~*$nz}LaRE&JRTVo+Nj|@(
z!of7)ohQ`81-oaUM8Ge2!FQT#*Z-QGBN2UR!qmnHLy>#n9?j3$@=u4U4XOZmKhndE
z&VQp5iM{0dpZ<tkm;q<Zi{1R{)J2WA=P-=nZyhbeYvQn{T7c<+GVQCz-~!D&BbvJy
z|L?U)jMOvQ?|!THWYl^oUzIwdYIpM*Dc~O@liei>hOow;JTYRLSO6Ha=Uca<k1t3W
zNFpd>_zsu;C4w8|=8U^FZBI7sG(UBNmkPOf!nD{WmVE=p4%&{D*H3PnsW;2ewo+Oo
zD-Z1-d1H*hhKh0hguSm@8Q}GbHHWml@ef=UmHO3VB6@ao+&@TLS<l{#qfu0{KeBw-
ze}lIhrcI22X7Bhp{Zqm$;!-Z|%0ej@9kD3U^$$|JRsN(#vmlMO3=l;~C7mO^sV4o`
z5C{K-(jL;fT8oZVa<$Q~>WDIFrTdigR51BnE*K^!SihKF5}iz*mzkQ4A%de9y<SPN
zpYf>~$WjtHY8b?E35Rp!79IZH;)B~uP39Nk2`ukde9V{;O+mrGGH?`OSJmxyE}ffe
zVJ7@TION}~t`ep5m^)88s>akMrDxHEBK|#sFUJio;kF{%%r;=)Mj0u4o^B;eN^_EL
zl6;|*oI@A;yNK#y3^0NkIr5%Pw$}6$y>=)+c)=dLb#Zt7)*ld2`0sRZu5I+qGRQr~
zK1C3tsJ_1@?bB97hfG&F`U@3n_2ATGygt@Ws^n<9>QWSIL#Liuc@2+HZ*VcF?%(-D
zHP!Jqw9jZ#CU($xu|<eTo?Xs{sel73B?2Givpb;M_oS;J5(zGi&qa-0s?(Ck6YC^@
z9~2s@RHyj^Q^5xt{U<M1O!@@j>1vHsollWv;6-=5!LN08bWo;jwEO;eBaj)w|6E=f
zWHM#Tnlh^-z-3bF_ojLso+b2%6S~wiSu5|&!eXOG&}5%)(tyClqhsLr&u|+PEzgya
z(d7(vYk1AgyT5$d9p#~<B99gYrsnvdg%etx(HA<ppFNp=EmaL!>9-D0Nf5;Qn|Ves
zChsJiZGX1Z!QeXhnS<&^Umc-_d;idL^ublmUbd}Mu%V>z+H#+%w(;0N;-bgl(>)bA
z=<-x|_Qb1r<ZzbJYD1t3-{j*FQ_r>0VOmChn^nx&09_AZ<3C8&z#|u~`wtQ<4f==G
ztt1!KU@Pl>E_B1|V@oU8ygdi}OgIDIa_Q*?{3So4T1=9H5J1Qq#XW(Z%K*0kUM<E|
z5k;w_R|#i<SYEW|?cmM_G<En^1yZl^%5AX((G%b1OG2I-j*&sfXj^5GV64|7eib!h
ziARf;l7DlGhzF?+oZ--L_xRGb`n)Mg*HifjDTn>5N|{M=8I*d+c_ze%U~j@IZYO0X
z9mA<6@>++87cFytUM)QfW^z8BMUc2_qxvQkf)l>mA^I?VlPK>LqiRgDHVArldS;Y_
zEq-VZH9CaDb0UhM!D)^{LI(|trc7#ELzj=8%P72kLRONSj$y;BTae?MysGq&K%Y+6
zvO*dHSFY3vLb^0tKIW@2hSUM~_id6+_mAq3a2GaeX?zbgk{8q8M5C@xJm#26b4_JZ
zFfAuXqt?In6Zi8{_?Av;^*feSZ;S0__fxRntK(V{xjsE_ET>f2l?_alwiqL!5M{ql
zDkP2e^#2v^GI`vttU}|#w;n0l$0(wviO1giF>gr$K8^;`*ZWyfQHZ}CojUlzk#p?S
z@Dy@Wh&39H@|>%fqV#^?AyCM7OabuW=@g_ibJn!y+r8bty={&gECge)zSjoxrtzaj
z_>x~(<(vlwM%`JS+}P3VMt&!j7#J=RVc%^N=thuF*HU;4-5mW%xTLQ)pOhcT?<Z^Q
zSLDaj(G!BWF)H!`Vj<`QyA^4(a$>Fk$FajSv==l=qWAJ_HTuWK660gotHLImUFRl)
zSFWhr`|}z)-Uf-T#~8lr5!km5Qv|VRkBF};0*J+b5#_3!fkN>+rK_*sZOUBi&gCx6
z64rDEO&!m9@8fPxge1&T3*)N$dXWTVK;<6jA<lUzT_=(=F)vmcqjK%+WhzB#HR+~*
zDGrjin+X`(_fzdF(F`)X(&Fv}4ARXoO(_L-sd&M<Nag2Wfxmczzdd|!mgnq#X?3@V
zK$$H~aKXMk2WICwQ$4pg4_m6F0clw}0@M*i$pjZX1ZcjJlI~QE6+@>zNOBBXwaUY!
zCrO#n?;cDtzzQ!6l^YmZC=^t4irZ7t>J!)Lj|J83SoB^o+2p%tt2wTa!AYnJu!{vr
zkJow#qX+hPK78TOjd<ZOm(tW2N|)hTY-W7v?}zL8X&j_?9qJ-&OXO7}N*X1`AyXU0
zpSBx&RVnP&<m7W~&ex<?lRAIAdoCRRV*c`*o#O;WMCZ$eC@k!0zk79uX%;(^xA=-2
zrT5YO_CF=W_;F<@7*Y6)(!I{99VH#Me^dHrLeKJb=6PCuoTLxCeG7kjI`waB$`NQi
z{mRC-a7*lI4mb@uEmP-~*;RVwY&<}Xe%(Kuk|}mJ7bGZ@8?L@1eS*?M9kZi0*1b&2
z9^uwSGU^2Od^_-!I6BP6Z21+$UW%eW|Le`LxA&U^4P4PeGbY93FJH!X`L?Dgf0Uoy
zB{vLnOsh57nJCXb8J8Z+S)V~h%`A!U6?<tX(T#c>M<3oSPjCyF=}b}ux>fw@Fu%ac
z@=ajJiQzq6a@fBv<yT!re{@`26trWv?q!;2_D76uBH8t5SeEbL;7P>4oqx*GP+eSz
z*LHv_!mOQc{fVj(paB(?a+xBeX4pX%5-}3`Zvs-6ii)7u+2%fypkt_Pe1NFAyTer}
zrVR3XHcN>D)lm8@8YShTf~;aStFO{oDkc$pMsC{@F?7li;S+JNcRpFs%F0GX*r~0e
zITijDEBkgF$ARM8_h%=P;zMb$jHKgf(n8xvVp_B&iIZ8p*RkiY%ll}(t}Rw9Eh}NF
zdLo9>oh7^-wC{$$)3z)qs*d$+yw_XicSKAAH5T1IbYm9@lD3;p%adPEM>i?wEG(rB
zB_QOUiJhN1_)if87QdGDtT0|(RN0|ovGrrXeaNJ?AT4)tJ+Y17rz&WS{o3H5KsnOu
zO04~}J&zX|8PwR+7Rg%p8+o>xml3<5fUod!1L=j^{O=cWk@x1^#8)qKwMc5Vi-~vX
ziU8VXw+@QGuwOK2yYJ+1fTaX(La=5P$!_N@nDy|(J!mt=NPMObyz=P^OEV4iL|zQ9
zY3s@U;1m!R0uPC32lenjEtU^grcBnP2>`Y9Ir<npm2<&07{7+jq+_SH?o*dQqn!eb
z?N{VNWJdXR|HcQXZN3VP!4ked0vG;OH@QfL#Hv8Xs0EZ<`}p6KSh-aczR=nq`bFM=
zS$ks#_BhG7W}%9a#>oxcG7r~3W%`uEJN+2dByy^OXJtF5K@W6<IPeFo-HVEp2e{_q
z3HUnv;;RrFKLYi-mw>yPS7&y+Xj>*hdZs~;xh@jWFA4hK`-OzhEpdlsk8iKmnxx4#
z!f$~&CETXjCXwpoH#bKlW7W|HgQu+AKvQ<(<F}~a$`npN1wt1I2l4-SIvS*V2znVG
z@Z%gqW>e|ni^Tf<TF}ocmcVs7OLsOYYA^wV8qi<Ql*4SZx13$0y0V(_p9FLX30E=o
zh$}Rtr*`6K2ZvkeHPB$*8__VFBdL4>?m2(M#`L_OCNWfh5)QAnDrZBd;%{GV#@BUR
zUkXSwZTt==8de-)i}w424LvJ@yG9S7)3tag*O0t^<94!In=J-=gMoofOsW5R)tC2q
z4Iq{O-0>hJTUZ}00L>Hkj|>jL$D?qISO7*bD+tIR3bzInX!V&u6hdqmaNqv*`ka0Q
z_IXc-0QmVyr8Nl40>p!~XbUzQ|6t^GW2kk6Jf3f%q%9AkmtUoi+d%^~-GCQReLAey
zR1X8!ic>5gy>wZ+m+upYMuvoQir}%WSdGCJlK*10pitthV3ewRaChcdGh^xNkM|p*
zjmCU?R_%#@Qus;)q!;DxR<^?Ro!BWv*MIzjB%)o#{Pg*poJ~R~$O+Qe_&GT`9$BOZ
zog1<Y_IQdZzJ|}JmSxr98^A4A4k8`D**gpAS8gjweaCP==nM1odWTfb`=>96yPKXr
z;%HAs!rUxXxwLa9o2@L93Wld$A1h@)j^*Pll9bn6Cn1@bt*t<c679!amXJl_V;<6H
z$t)))S<OS36@==5+GYvqSYrAfjP=gxcweqyS=>Ivr04aB#LtX%dvnD-fus~mOtFc$
z%r!a%cY^$!akyJSZ{>m#6;^Z=F<FuU$kr-z_G9A&OUlXIho}<<0l(RXBNt4@URQI1
zcw4De*jeXHcQRwP!nmXd=aK|JS^{@852L|@e-so2xeOF+6^Uvog#hNpz3DO?c{}AZ
z_<5J(sJj6#!<skxBl9yhNOtagWrPTdZCBV3ZxSjI5*_5QL!&E0uN>dPak0m{RkQ|>
zxxiD8O(gtsD)Jc+>`YYZ8bu~FcDy+F?4i+J&VPc~U_PEn{}!2sVrq|$()298In(12
z%<u9G*J4jLJ~=o6>=l7GZ}2#k&rRykVWKV!J1{PFtgK@lP1g^t<*geoRcmxHXRhvh
z?H)dVOEpLSy-8;rb-qiGC&9!bkzH6_M<=~|KHi@120dlD=A4Q2I=Q1h>cc*@O;_+%
zE;Q0(=Sg(aQ-}8D#RioT7J;O06=DF6?F8Yj;&gN<s2DLEeKE9BW^=&*$o#9+#=feP
zXgBuGr*LVpwO{v~w$!f7XCv3s?i2C$LYtQKc3cDpHh``8d^qPyS?V#wBq)R2EAyZ|
zL<wA#m3jvL)MwYdfV$z3*{S^9qE<8f&AL@y>McN`P@?^H!tR}=M#8Bv4ZIxh<@aR`
zrra3f=rnchVf;xZA45%JwC!M|%gk}wbh=~#LM^y^o`9}w+PX39mK<5heL!4-p(j+w
z+}7#6fG_XU;Y9eSZs}4IKAxr=ZaJ!r`l<GTubGyYx$zJzJ~U%=L6EfzLB~ev;bBKC
z{88`=LzVd5Wrn<?5{N19+fSay<F<B5We#+TQ4jQ>qwV_1jV~xQrkU<zoO_Hxpusnu
zmZRqN3r(0UiSEK;L2-)g<6fCgn$`Q-tHqoUs9AOCKS-r$SFf-Y+)u~&$^}xB%WM?u
zhNN(-Pn<}sAI=1#wVUZ8oqInodi`DQ>60(!EIB3-G()E}xvpe=nLn~^T#uy$nFt@#
zV|`^c2MZK{NPPTBGr$;G`E9Pq;>^<3p3@R<X;{E(oZ;obIuhRz(vgtmUXyu@dZg70
zS21i7!)Xj}D*@$Zci(ywHO}x>@~t$yXW_cVoebEYylr(2Q>%Vl{coK=-<sg8`N_+g
z^mvy*yjOaVk9ZMrdx!Ol<h@JckCQ?g4>Nrw>K%tcjofb2$ixH;Z_Krm3Cmx{NHSO}
zQ+5`r1>4{sDH|n`Lt#6fHsVq#^ny*O%|T<IckT2oWn3icd1sg;S~D*vqJG)gcRwIp
z#5ceB*1_m%m8(F?=vUluLXY;Ud`Ud;3@dZaQS^u_bd7HNnSO<N)KF4IL#n2|sSvrD
z9WPR?3H?EhEE1oA^+AyB<u#N3gP)!fPe|QDQw$);MO=#Fh-3_nHMMoL5NBMtBOmFb
zUJ&Cyk9FVR9sD3Yg{5~(2`xaSjj2FUTbC82x2nz4pkWeKI%J|4pMuizb_Jx;Xeo+j
zTcO8WyL>vXdY;0d*oiP3%Y?z57O$1J*c&(cP>(jW99<T8Nq{2pr}wE=Ebia3aF<<`
z{;EBK?ooj#8vq!?fh2`B7CZvx*$Y$Rxj5se{}2bEeYsparHkt6H%s9c*vnGG&{}Vi
zzUEU?SFdLoQhu7Kz~vRW01k|w&;WE5t;E~!7kh&PFt4iOqmHfBI_}}oup3^)C6Z~s
z=~2hrMH$PLioQi!tT(#EhfvjMw@;l2wYl7~;KDo2m!NQmx~csb_hu936^@{@eF?Sc
z(_ynyUg11{_VMUx@;L`%3*mzD8#Sy=5l-9n*Y6i@xqAkLy&}t<wOPt<G;?s4@9_%M
z6da<TA}1@c89xV)ne+YOl)W)4OnhDz=x|=u-o}2Rz;6_~4eH?-tz8C0^r7r6G>*bQ
zLyxL9OD3w8{QN>?y}3gDRZ#n&7xaK_bxGuzxUTDt6CpYLpJJG^*s?~7eV13+ah!>O
zX|J~u0T}=`k^j+h+UQBLKA(VCM;=?l5MSLg9|1j@j8wtF>!g|Uzn7X9D4e?Z=l4<7
z53?90j!^A`9x0v74`SMy`5$=8hP%4T=@sy2sYm91(7qac!x!Ce2aITiGFZ_+2nssT
zBJLza_^t@1R_>B%-s{F;>=q_HVV{@r*4|A8D1~-lmPGul;Lgh(3W?`!aOL3LZcZFY
z55pT1z|JbSa$`d$SNZVk)8J;lg)cFF*=pI3y^!u->#96NFQ{xiRX=_+iPfkbCTxM$
z4*TJf7ujtJcgW7Zb9DWvT-|sqyfzSE8Z8|kD2D?xtGAr4mg46k8Q!8oN<;9L{#gd7
z{uZI-bsyE43|gj=n5Z@}Sf*jb%<O(anL&Q}h|uSEEEN-7-97vd{)m^qS($C8^e64C
zsc4QXTnG1zZHaw;wro!gk6birPV_IUTsll;tUQm`eXEzwju*R}`3dCS4bO9(SR5-2
zxn<1I$`A3``X54k(l;m^>4Lu!xjO}sEQ@Ggzph=swW3b42?Fv=`D<+61u|hB>Ci1t
z5c957n{i$7s<+G>k!u~^U^5Ifh3+7C5FgDKy7?<uK`{<AUN;N46Mn2RS*JKm!<tzY
zp`yF40QXJBZluoNhIh35xu&*LiQU)S<Wr?yGp=Du%iw@P+d!i@cOMy>y!db~zkd!R
zPtDZ}+OcJGd!nDv+0(9@2(JyZeTB*R3G%q^XpuoU&2rp|XY(%J)z*oWR(#9TEc&n6
zc~Rvtw@%Slb(<R(#_)ZXmA1QnkFj{kE&LWsl9>9>HHfPYj`HX52;bP@=ZyL-GTn{j
zPE(5DJz;C7Q$4AowYHtSNG^wC2+sSrl&1|l@atXf!x#$<DLHT9o5N$#I9LV*^}zFy
z^pZm5OZ#N5j4g9=R>%?&<K;`3^GamP-P|`rwA1O!W%w;;Me<G^XRU1Tg0QAy6Ui1a
zMGeA1Eg1mGiKED)$k0(|^9OY}>v!0`3uwuC4WWN~bP{*h!ab+Gx|p|rbcjq9ir|N8
zFByFe{2H&z6UWloH@^+`O4}uVwupSg^piW8WTxKN^Px^!u*dvo8qo{!cfzL~*`=RT
z)W%BP{#K9~BSEmEy?qKplSd`=zQo!NaM_T08*L5!GQt;Wi^>q{AiWyK<hK%VhJCx}
zYht4AV(mv2YAUd}nbl^Fsm-KTDjd^%-nya~e3FOMoU?>DIa4;qoN&NDeMBP4;M)y7
zW9Kf}G_|x^k+R{JXT~AS-M}<D5b(S;594PfMkSZX=)8T{tcmUPXibS>?rpVte?)YN
z%HnCZbXKW@cJFpj+J|lMX@zF^)p7JfYV*zQgSS-<DGhN>dyTN33d>udJMYa2H206u
zf~U7XX6%$NnJ4mS+|1!dMN6PCwhKR^XdZ)9X6uffC0@{Um^8&JsJUVWdxD&<w-)PU
z=X9A^TBjZ(c$62Z#!ik+Yg%GRg2oDq_C)t2KDrPrSZQnA1k~^KKYTT&NBlkM*nqex
z^UqF&`w1kJcg)dWkLn+Xj{Q207g;U;Flum1%&=1?Svx<)EzGXni`$!@^#-E$O7>Nu
zQS^eb<PXY^;UnHi@CgIK%1wf~<Z=byb2}t(KUw5uC9}^v4CiCrppet>fz}qgHir*X
z=OP;rm7y$Lg%|X;KQ^v{yX%^el&$+Auq$Ht^j}f#$(k(9EWP*c3OL}orW+FBY3Z}>
z3++VXCzZx7yDS9G!!y5f)eKoRRWRa)$5kI(&W5;<cu_s#a7mHnVRD0?8jR*Jco<>H
zDc$gU0gE_-S4fI-Hr?+Zq!8w{f7jm-B}g*1X$j_~kfso>D>QJ()x}wY0zxMysecz0
z_yNE82gxD^0eGaY5>%!D0*?MI#l(_SBnQMtw!IAca6R$u%tD<efm>ApSGXnV>OJBb
z-2FotY{A^pWE6AnFLI1wc2%~b_oTOC1KW93jrUxs{sfIGFWY7mioWDXRQ?siRm~Oi
zdDp2a<V5~+q=$fpXz;JuwV+*+l=;k7p2)*zM%;3X8h26{>wJ3Q`DTIT-V&y;Y;n3U
zd%Tc@E8=C;viV|f(1Jm$%L13{<(5_KMRe>`)=p!>M$>~@n80wlusBWGD4F1QdiII9
zYczGmM5b7w5eYhVEMSEP_*s1O2tsXpB~J?D_X{n-R$p281Nj6bKQqM6&{s;RXNq7$
zu6~mU6_qx0ZLz;<yOl=uK?G<hsAkV4>CG$rhxh=e&Ht`d1i)u5|E*QJx8YE@Qadok
z{R_$YooPIU&;Eyklc;mM>$q(MEH0-UtoJyZ(BprQo@w4Uh4yv?4afiHwY9RIu^RB2
zZv?UJ>&0t2eZ31+-i{tzv(-#Rr6X697yjFgK04gA+`n4S@?m8$SM!WLblpJ+Z6N2@
zZLf}W{7Y&4qEt!gk~tzI%&aM;-<1yE(@_0pbD8Vh57ipHmXfA#Tqh5BK629-qwmI&
ze*&%{pKv8ARffpfo3BnM(1Mtz%xu@l0YIFcJ?D&X+(twx7kx2H94;z1R?C>v+sc2b
ztDi+hulGm%c%PRExlHj(1YWxVR8}VTQXVJ2@Rdg3@IQ=h8Xsl+i{K2|NXFM9`C9K7
zC|^;5zaS&j)CfuN(d?2OiFnc1B4)mS+?15i<HyBXte2yQ$Wi~$)B3h`Cwn|jGUm;%
zI?*whu)a%TxVAJ~Hc096>43y3hQvoZ)PY3#zTZ)L@5Kj5t_X)+Kaz36?s-wq0=KeM
zjEvWxb_kyU9#C!9HlEGbe8KsqLVcWy&E7dQuHjlgIV(70pBOVZVVTw%B%8Y5bS)a{
z6QatZduzwptbg72qc-s!fc31Zlw)-}b#^p01biB)5!8q;>O5iV_4Q}7Sq7n8?z(cn
z6kmGvj&DH-61hwk_P1>riDaN2A}J&rU&#I>Scdk7*c>+^3A+M)-(lu(o9!T8PSjY~
z^Ne8ZorkydgF1SEN!*?GsCd#x`J-{-7i+t=yT9~(+T-)NF3MP`Ey0iC1>mN91xVbK
zCtmVlcuC@=+~s%4rg8~0y0{9#)oQ#kPvl++AwUmvVi>A6vZ`=_+@-tLeXm3qi#ap7
zGE{a{_KM{3cpCdqW!cT*n$Osz$t2bxW>bbp@J5k;vo^qdT!)-Ru#lwkPW#g_?KGE_
zsl=O6YGeg=*a-~lms9*x<7VT))xGW{4zl|aTJYG>Nf$Y`vEH*?-k{+&=g?#k<x8q4
zbfDR*UhAIqFonl<S-sQJasA6y;&}y!U(Qz4L5<~y=4fBb%wIkW{2{~Tv%8Oun328!
zPiHhe*fZ*l=GM+iM%7Q&k5yv0t5Tbwp!0SfE;cvyFEHgdvn8WBB(uFl?(ZmY(6hcA
zYWQ;oM#Pid{}gjqN9P-#ZB0>QFseAP{xOy9d=D1ae=SwIpc|re3^$7DXNM>BXCJh9
zI)uWv&s<AQ%f7~n?tB@^;nNVH82_LkYzT;=!acE4YE~@HmWN-5<bUa5ySXZ@iezpi
z7S3t0zjOcOc*W+HEWumAsZg8XDLuOUeI;3PFAna1eGKGNkuLuHv;mXCMH8J@Gf&Oe
z5!XG-CuYWyqP?Z<rKMNT46VZ1Z0dMt;Yq$G*1-r`^_vGgip}A*;iLnwi0t@4AOMxN
zqKxaveG~ScQF+?EiqT`rg_*mI#TAy6nzXbub*SzP95SI4+4O9cqBK4EfLG=zfu@@G
zJ=VrNnuqkIMW?c#T&OESPp=b#z2lKEfLhjpdGI3fd2Lf95ftvsy(7#t-Ira`-Z%0N
zmGpf-1$fh(f#K*f6HkU}5VO|UFPZ#?BIvrvi+dwkP@;s4x=}h0N3aDB@o)G|^1f|U
z)ZOD!Z;%ZJYZ!WC7BQpEN;xDvM}7)&AVH3NzD5ubO<x<zOOhSPxV&u}S_;ZghuxdQ
zGTHd{it;wgDudu*%*DE*`NfaLPKn9w(X=|}1}_v_V(=V&24W=uu3NoEx?*dNMr=3j
zioogH1WTn-`m2WEG$OZo+XpPp>*82Bjo;g9G*-!nmhX3m@cOdaC#aLv`fnqZ(bwhW
zs6UgrF4gU3P@xgNQp+0`;(H6*DjtWZXXNL|7{5t^b9{9(u4MtYi<FzNsMwRJ3*!96
z*^`<L9%D9t%^$iCYpya64Rm?F2!dS{xx-DX2DauZwW~~8oUMA-x7^lu?<CY)y5m3C
zqXf|L4W2+jvUMgRoALnc1xRK8N|=WH_}SocpHTn?C*sikkE^G*t|?)8sH%Q~!`<eJ
zs}7l%uUK>d{jy@(t4?|$&^_-uC)DJn`L3X{;wRMd>KM6lc9vvE4yA#k?zt&J)uN?9
zY$c6WjKW72MM<@7K~NS!%jW<?MmwBZ`9nMdA6>@OQXvn`aR>UecF~-&mH9y{`-#jX
z7iED^`iq)!HGhq!6Surx{+KqrZ5TEwpj^uJ{ts7pWi*GpUU^Se20boBgjoW~Y=CSg
z{Pwv9c(lSG=s!E)wt&JIz!;fJ`}?46|6vWIp#x!fKqwtxR{C^(AVzP2=`nJrLVJM}
znbDuGVo<6x^|j`MdKuZFT<{<ufRu1lEtqQN-&d8_iaW8Ld5J%3m!=y+SKP;N1?1yD
zHA*ZHKTbBT14=AsLVmS!-{r7&y^9X!I}g>q*l6XF9AhnWm1QHj^1r<c(D05w&94k2
zRzHwS<Dh?G@a{$<jcBHNK@s6DKkQ`n&Gqw8Pn78VYd&-W?rG6ZUc&_Vr(>;V$fSG6
zcT~rA=@(T$U)XXaK5k(l@s2qhYD4hY<7~D=6Ks*b`G9ylCdrI}sN(+E*y+cN8C0EE
z%57QJ6(v~YSux2gSWIWuZfu&0was#%YtzDoSbhi`ojNjWOYoWTqg^dKApY@#QHM$o
zc8ybd{Mh1lb>JS3szQB@9y7mk3Z$)d#H(6;*BCB}#cpJACpo;tFx#_O#?f<}l#6Mx
zM_SsKe`C2D8zXaKIr2FwrkrZSHO+61uR?2VF`^GUZq~1YAUO_-g^oHU9G|?|FVjlp
zap3-fgAKZybGw!Kr@6>R$?#Ja$7hEVOh*J`_=mHMyy#X5WY`Nz+l?ve_gN?onxmJ>
z3F#zD38lsWVGt^k^6{)0?W`c3x=M~8?}PLfB}R~K$8MXY3}`WpCvn@v39sJm_V`{+
z|Fz(6H`*b$Ev=;2pQUJl;HNTMd;Vv}tk082*!KRTwplF8{~-C66;JIHlHEuq$Hnxi
z11j8(&q}qm?3tXD<c0!BLeO#THiBQ#(iEu-9+}!LRw$IqTMm6rr;00j?8f9@G2Tlb
z>*D+j6&&RGz_Mi&Jc2~Qp3-L(pK<h6$5?VH1eKT4p&x)hNv9-<J{f;!5wiT1!2CAp
zYdE!kyz61aLchbqEN4mwy^^kcsulWlnMo*d_TrF||MQp>9}*n)emaBV&LMjbo1*)x
zot<J#%`%cJ-gEDsaBHSVNA7a%@6unYjUi(4q{bL+!HCS@a=588Mj_<|rrjI%z^-Gm
zzkzPzH)Z?fzugGxlv+gttQ?t)rQ!u=1{OnXemlv3QC<K0qORDtWY0r&)hKKDop7zM
z&!^2KYUW#wNEJ~aUTnuNYE`HAhwX%P6M8m9WVxxhxlNX|3|dwfRnw{V$+DUjx`#Zn
zVg-GkYLCjRd;^kyS6JcDQhuc;L(D)qjJFs?yh{DAL$tO}dW<fK;F~MO`2BSy{<%*c
zGuLTNcF!D%UTXja_^RSmcUt~<q(SBLK%7HHaBk-Is9QoqaeZm=u**Q?Nnc?#R_2y!
z^pEOKbv@mFTv;A=+$7&^<?v4AG2F^u{C+1{U9~DnhURcM890gC!s@2Hq)6IIlu*Uy
zr$8MCqgR<WYkJVgVyA}3fE_LRL^-Hn)(mg_=5ZEt%&saq%I>$Fa&WdqKQaYRG-8yo
zb>3Eps86l-?FVwO1A~$NIR#@)xXg!0g+IjyXZ=p*AE|DehCpD*qiarDikSpy`<V8T
z>AagVv%=O9`wEVe`*)VBK1#H7-}?>2g+>D`PtRF`YJ$m%__$Kc^`C5_O9R5!H3hn0
z>eqRB1!TkSh?#tOApVAr-E5+&|51Fk;GAMRu*hVFDfhyp-FMU-iTJ{nPYdb&_77-h
zX)bQgK5fgt5p_RU%Vk9VrL^21E4iXGa`L0T=fZv^YsGQ+52dXm)TRi%j@_`vZRuTw
zIjhb*kGb<PkIPFj7ZTC)*wIw(GUOrpxT1C(@@56c(bQVVDO-+dt_&p{LLuc$4oMiW
zUoRygSIg`&y0p>QPt+PINf|S5-;$-~-Z=bbEFn~X!;9{UPX12bWIrL(YC0#_px}5!
z>f4)Su2^H)-E24Q`Ne(0y>5(dhldQSEZ%-Wb(ie<686qgJ2q6h=&6XAQ6i`EQgd;k
z=21$crp{h`5*{bmgtimg^woo`He)NWHeKJ^#V38}e;~9@H%5fyA?RN|DQB?_^?!Y`
z>+>#yOdo$?RIsWS_jLJ!O$|haB~u9^DPSQ2jEZdoD(>$;FhBoi2<epo^jNn0peq8P
zG<5y~{+Hn66)MEfdXc+#nOk~=wLvbom6yV-2aS>Ux2#Vu_2K3!@=A!6Ns&E9KpW=w
z8o@PHVF9|r|G$UpO;XIQc8oyPdZ6P?&~*-=1l{kXSVrFL0NAIf9P867VATlz)&IYd
zfBa#^`|zp%8Y1=dn211Sy<jD^QU2e1{J-5F5t&<Zi{j0XbN4|r4xq~u{X2|~fyw%`
zB}iVD39C{@)^+(Q?=oGg4T}shNk97E(5;U8tICJh3($2jpas;dIwy>^agA7vj~2gZ
zyqt@Ik`hS&(DwjV-AEw9`3FgO8+iM5T<|9~+#6{H(WPO9DJo16{(~eLl4eF034-|G
zmu`ZwDqhWUwLUzTh@2M;ZtA{hw(_t;yy!=8&1nATRBR9B(0_lb7XW$3|K|~$v_-Oq
ztzTSn38O=`Jz>MLBs_l^xJ=<Bk6QWUU3fPm5c$r$;2Wk<r(d7XWE!HjP?>PWd(Y~v
zKEfN&X(B`6+DqcbWNyRuoDVag<7YC&&l}JkZswNEb6XA^+Or1ULS(2sStXANP00QK
zgUp3$mcObdMqqg@;I9Wze7O}8s)6XkFvS1d>L`Dj_(QD3hve>G=miySJ1q0>;U|;)
zJ6dYdN9g=Qe#nuJPVkZ0F@wA5?@22=*G+SJ?Hmk8qWN3-@0WVMPE%YZL+^*QuTES|
z8$E`}WEusWq;gMML!Lk-x(jZfdlTjd(Xd8H0@)w?_Pm9xYQvPg6&6fZE6F@q*bjuP
zQtzD1oOlAQO(@$+se=dB0R^Mxa>VZ6nY$xfE*nYBkAroIJBnbcdC$mcHN_wKAaRTt
zXv6>1A-$F)36emn$36rr7|~|13SoNlJ6oS$fk4ifyCj(6sn4469gm0lN`1JQ9cAi}
zmKkaY1Vw6UGNXEaL)+F^b73foQnhnELC{uc-?Z+doq*jK2j`JT<YcpCB)9Id-?7>o
zrHD2O`p+ia_*FxRpF6xgD!)@q&&j+k7X9em4}T;b4HcpNwzVxJ0a3J>AK(jVzq`6E
z*P>4>D)O2Mk#R*vr=0BPNpUP$g0@OB$Wb$3YyXs<k+cW@F2x_j#zz+YH8PL69cc^R
zRgu6g6ap9XaphggVj{_;smQ6w6cA=1r{vgL^h~|ZdbOC#{Q<vQ8Px$&8C=II)IR*y
z=NNlT>Kufqv$7%$TG(Ryy4Ge0EaoWE*SgiKedU3bGWk^QM63R4-1S<x8;}93MokRd
z-9m68kHTE<Q!(wO3>Tp93(qQrZ7nVG5_ZfIq&Q@#2!9w4<4w;=<-74b%8rqB7fVt)
z4E5u%<hiKC!u(~Ol(Zwpz~Y?ZKiNnv-Vu$x%UG@RITp_@T(ua_mxo(3;6->o&bZSo
z*QIzZrsSI%S4`z!a)o_9F@zvc2@GHE+?1^lG?*vk{>8axFBiQqFwT^{%xl{*4_TB5
z<rwW$in025vTq!!Q|e!*YTD+zhu(3`&D^tUd_=-fnCV&})x+=Cl&1A(y&uo#64dh#
z(z-~7l?T_!Kw6plqvzVPtO>Gi#MM&YGSJKbp^T0|g_dB7Y3`-&2`Qiw`OF6KTWji9
zN$I(?$(n=*i)kNdppLTsp+p=+l!2W(DPAf_91DYa<kBP2cYp`6Q$n3y`*nH6zXHd_
z)|u_sc7r0%)cwhCVqSHfszKD_4j{I4bZv@J>U)<~Qlh?aR9X6WxU$C77wB!)=KNxo
zxPwk^qwiC#MH{2YiRe!Hf*waBJ53`|>Y8?BwB}c<z;{yrr>rZFhkA|r)1@Ryid2>f
zS%-@vT9_pJmR%{Mh>&bqlJT>Ia<eCE#=bN(Ep}P5XB%5~vaez6GmM}2QQiCA`@aA9
ze10>(Gv_(abI$iX=W)(+yx525#8!sqTQ*R42Tih@3FHy6aRX02t0HwL<+DHNIyvH6
zmbr@gOS45jGuXP<oM<P|bC@UT^kL~TF87#>;vtkuA+6cRMRcfCl+lCUj)=|RSNoL-
zEWvz2?5a|S1B2=W)LDv%@`_hYB3oE3dN(dvXS!y@tr10Xu~tWt&86N{HtTU4j#xZ>
zxyh+qL;xi=&zy+xal#MuIq2gq0&@5c<DuK;gnW;(C%XvhH{vSD(LnjJYn^kjb5Wnb
zN;BQV>xS@%N2=<*D!1wO$_S*Qa(Q7d`--K!UBs9*di0Jpa&;91CD_be#PK^=X(>Y2
z48$XYF8qY1>>|XfsUgZR>u@;8EMWV-*$*v)`<_cX(9>g^@U}M0U(%@>htW%>K8FVS
zqCxl_Jqj!Hfy9_qRd7eoU*ESE(BwE!5>uj0rJC*{z5@qX(__*dXN}bjTmd`FM^l(p
z7!ye0h$Umwz>U$hOFJ7ls-^%HwIBd{i9tUHX`9o+oxZMyYz*8fjfIF5FXV(1jzm}^
zP{?H<dAAdsm;%~oG-|(DLgcu)*-IT;=}eRIKPhGbg|j%BPJwZo;&KABWhMLvT2jgu
zKFV~+T+ZtgPWzk~%wI5jW7XFyVJ(f62SqVj6-QIyJz-F2@3p}2W9{pRo8HYIlXw0&
z)Uk{3$D~5UZZLl6_%7o4yHM?yQoWWl-njatTo+2nH`>I?LxBOQRPaBJ<|Y_YCxMek
zkOlo;stjDfC2+hKovSdg`2tLfQA)kpqBHM9F26`4vw|Sx`h$J&@&x?2JX@Wju<a%R
zgzKQ98&>tn3YjgZP|jH9`bLY#^J@$V2DbJAmo(sedHo>$YgKuQUxB7^huE-#>aKhn
zef0U{wyxMiHN>2(S1QFoL=-uBoaih@*#RpX@SyuboNKVP)jf23%5z~3gt14$zONSr
z2IC^Os!+YIDxX8-IgZ-Mu>a^WHH926iadL4G^gCQjtqWaxS?viGAVHJqUzA6L*3#b
zcIh|xbykXy^K4W_Q|~AWd#1VGom**h^o=D6c(EH;8_E4&j1Veij^79M3_l#k*>P)h
z)bxRr`!}jV2Mizhq-$jLd+67CFU0w(%E%4BI(U~yL^(1)v}s|%aO_OoL8?v8eF2%U
zl}m@5@Vtw3laI>l0uJUG?=u)$vEg-0Tnkb^t-vMZq0^>EpR}JpMw7NvH3<=Cou0-Y
zRu`8Hd5D6GN6v*;u5D)9i=7w}7q^h5OLum2_Gc0y);n3JfAfjWv2n02&@&xYxo+E2
z>?+IR5dpQL8rDl0l}$@8MYS|PxP4E?HG)U0@d#u(RWjt^oQ)AG_E)>69TD7cdl$jh
z^^IQ6rB*#|#wK}iOrX<0&qTh-O)fuoBv<8@O@&zuJT*a$o#Ep|OVszU%I=iQj}^zi
zdT@U*4NntR{ehG5di1$ZLimSna;R5w>Gy5S+3))kV?)R2d?+-Oo>MXsDmQveo;bnW
zZ9mp!c!%6<2KuGuRJP~jQfZB(@JV(Nb<XVGjad1tj4j4EE{<>G&vpVveSQ005U*;J
zaP{f-gPM7EdI9!rZc@`h&Rteljy~-k#^umHPyZ3@&!P5&Pnm>+6oymM*-Mjj+sblm
z&}N@%l#e$YswWO{4v=6-ZOXD>n>EKy>{x5*xW-EM&eYZhGPHXkamaLQTCYM-#E#We
zB~CM^(O9@Hfg>SIyFkm9e{4*>H1<zE-sZ4HnGvR|vfh=sqg?vt0!}>6CLShs?TaRe
z1rdj*)RYg54`;ux(3XO2D#+L2`B3|-Tj&P)$pG8$lN`b;;gc41<CD{CRN=JQ0J%5W
zc5g`cz6fi%pZbz*!QPQ?(>(33ZOgDOc+9|0jMj<K8e@|&*V^BDu$#eH?|E1P8yfpU
z);JON-dCBkX=?2kBHUhm^JXZGW93Am48ibyXj9?!+5Btpwa=Yu-h(!;+sFOR2#}`A
zE)05R%9UxF-<%7Z$?ZG8wk|~*d&RX@L&<sehMMENZg{c&CySAG`IIass}^oa%Sfr&
zL>0>-CJG)m4Z9mXja$VJ3}De?p3z3SPaOEAFLXG@T;(|85q?{yJHiiY4ow;*H#*fQ
z2`_Yj%b0{RB8>&=5|JSFED)9ld+!%%a_0ep?9ifD4g+koTMt4vm#-4x#(Ibt3yp|T
zgU2=b-&c&DZ$nS-f)l(mUjdqQKvnmXAmT3K*H5JXBw7Yd-N!4Yn=43cu+riKDL+sU
z`WpN}?;<D%0GNl8HS(AM=!0Xba{%beKd4iphtWSiyHWSrb%0L?({j*cjmOAe?}+{4
zW1a+ndyf$W^GA0JLFcyXccMXU01m_xl^6<p>`x*|Bn1Y)ut(G8c{dp%ZvG-DOs6(2
zG5w3jKntdm!0lgfyOtw-L-Zscbb1m!D%Mspy$wXG06t3Bgj`)ccB2C`as=x73WP1U
zG3AL~75fbJxi3*xf9>XHCB9hmEQTC>GXLz49OYv*5~qbj6;Hl9MjTTPTnMQz7h~IT
zjO3kN>Bdn-aP($S%>Zcqm%ir_h1YQX822j#P7q_<{}p3=(9Iev-nT4Jg;@g;##d|L
zV8T2#NZ!a5kpHmv7{uv;X)Y(hO@A**NS<yF-!6hdx(d1W_dDoOOR@s&3+7rB%mTCe
zKnHw(Gjv;w{eKzJ+jV98g!J}KUEtIydFgc}v@x^r)mLOINI&eM`2cQAUT;Fz5a>#x
zE>+OMZ}x^OoSRvLVXSv?6m2bI$B!t5_cbT&8=TZqCC)1Uzo}ickq(vN4d6t*2F@sc
zlxd3r)CT2MP!+k!v7>*WCqO|}1n_Rll23n&1KI)oQn8RE#1jtY-i_qAqVZ+ksxMzp
zmx^x|Mg!$cqN<SIxqpLJOffJ&u3&F91-8%sYx277JqWz_ic^=7Re|um3W^vI{`d`a
z4VvNg$#0huE3Bds(D#2h{xgB;ECt36spQ~e*wm@lJgbMX&F`2-=WlOPMKJXH54m^(
zn9$lAD@zYKd{qBA<x<iSxuvZu6``a~gIS=1wg1sU%SqFFjY=m4w3?m6w+zU2-==dN
zUJuKVYP6Eq10?X0-t154nx8t91+M0DgMr5EBEkr;I*93NI$W|bV;J{tqmV+<Z*@qq
zPU{iz;w9_BzQ4uML^FG$K*)@tM(@o@`oi7_{$mB4@@f3V&dY376>BU&XPy8RvZsn@
zGeB*6K=CH`6z{1G)Li~=6IH&UX;4yn)E5uNAw11belc2B^q1~*Ps1K4<n&!}9Q|<u
zZWF=Mo^#$sgnSe3D*1{X-HH8|>9L)sHGFMYxv1$Z-4pD>uibPS7X=YLlAn$Z$t`Nc
z=bV`-We@GhDk#fH^y(x(v?sl@>5XZ+z9to55PYjd7ufGj*+B+}QYbV_E-LV>9Xsn?
z-^&K8xz-cTj==El7VQYr6V{si&kor+R^db|Oi_YHK~Li#%W?E{*H4tOt9jbBe0wU{
zx+iF+WvCqL_6np5zv(TE(yIR|S69LcWcJ^Q2YD;FJ#HAvDKECJTh+R@$}&OI`>+X*
z(V^On`7Q&jeZ~6wom6Sa#^6c}DxB}<nAuP-u}*i!&llx-N6Ge$;FI>LnRh)};@a1+
z7w3as=m$!li`cIDIB5Sjc1_%WS<5{Zb-=^mtKFX&+KI<MyIc-zS?bAEdM#~pF%Yz@
zMvmWldTVbDevbbqN?vzO$7z*+U~|0w7ti293*`JE-(n@isJD6jHS`cZ!d;*y?AoW6
zG$P00M6=j_@p#9C#i5s8QZZYatM^}`B`$8y!%{X4wp$)0{VkH*btli_ed)B;-{z|4
zghw_>d|#cykbw>I2<AOWa~TpXIbfEYR`l{P&NFT{B+xeHv#VR&oAj2?NwPKlo{0{@
z+K2a@`J?jimJ=$zsG$4L!LJS-jnk!#OzZg9ZJKszcSM@<8=Ni@*==#h4HJ@H-2s{7
z^Aj-~njF_N4HHs|1@M|;{_#r$cQGa2ZKOinip$yD?qvQA4hI97VD0kL#VYU2irrR|
zc~ozQ3xD-a+u%KUW^1|o6MY)3Uzme!iKdh-Qkq_->{RmG0THA9x$P132PSj}f<i`c
zH{^4M0}3U-g>1Fjs$QU18H_N;J;V`auy`(&IBDTIeP4On$LrbeFW&AW<;hvTwUFg~
znXsLRst9~u$LN&E_Wt$ou<E2{hFSW)`N9ty@AAb{5sx|XuOF>k5<*>fb?(P&$eRhf
z&es~enjGkeljQa?@vH9$vIu1KeX666N^Hdn6~FC2QE_){M|`+<43$;6t(lFMwCeK6
z1|8A0+zG0)saKMnZ^niAq|fy*IAU$58u0I>W13dLwNxH)?JfK^d?#COsZDj+>Zktc
zq3ek4{3OBtC6SAG0!cZo4GYKbA`aRIegU`u!AijXf^wUem2AcE?XbZnC)8iG1UA%E
z`dviqGf(y;*Z08FbwJDdI8wk2a&;7(E`b1L4DT8#Sx!)<7!9HqG~7UV0fR8MxeA0X
zm#+c?V4?;K>>^l|soYykR5CPSjV8YxXyKMZ8>Lh6i^z?P%OI2hW%kh2J*Wz~=X671
zB?uOdgpcmf!Jg2{c>sq7g>f1k8!Q!3*lM^7faxqWqD^hqCSM2s7hp>RaMZQB$b&PO
z)p0N?+@2txJC3XO>H<0F{>y?FIO$MW@h1mnQHDJZ0OOVo3N)o9%lySAF%G+ke4xWy
z81iw@YL*>l7G?VVQm`N?(Qg=>Yw&`t1?w$J+Lh)0$Yk~ZBYFDWQL!WWgASS*O7pz?
z2RqdcP0x&%J^o7Stisf$Pi!%~P=<-dN0uHb_^M?$JeoK4&m8940XJBMzhD#S-Q0Qb
zQ_e3tnR|!_d_g6S5`!e=Uf~8+tfACcfW&_R7wK;W933q*(Of~j<AUh?^h;KTZ9U+l
z(BOL?^y)<n+0g4K*l;Ps8USl++d%-^2|V{ffYh-Nxn!<Tk3r{$5G1_q_a8EIQco2P
zd>2VM4}2P%PGNQE!BEOG;8OuF0~d|;<5E)BqE`q-=zmR9H2R1qSmvuZY77`)P!?u%
z$^S;eteDTNWG0fl?lO12<F$LYyN1PkTIalx&J9VhlvQx&-%Hsl=qXET58Ic{=(lwF
zwz{`$`6EqhWm3<VA3gz5Ma*A7gK4V4!!suCHO>tdx`P+z<SM3@vrV@yW5V5<jqhJq
z52v)v>@7CEWz0W1+j56`g%tJqrY3uHR7meQ+qvL+cxwdoD#*G;y-ZqLJa2X-f3C_}
zoU%qK!e~tsG>nYfnq2%OGn?(pv*Zjgi&mXI#q0H~E>{)%(Wg!8_GE_o!Dj5YO*G*G
zxqyl!8NnJrfot`k(c5Hg*at(A0Q1IrM@O=%tIQugiE+gA$9(@6ktuMM{}5?cSAc4j
zHbBA8w`WY*t=2C`u`P%Er8kTQ#TGQ||2C*eVm0xQPh|QC)Kdc{!!WYa^dzzA??;#O
z4R3?bhOn|&L^$ksEI+f%*xbE(bLJz3?U<19qgdxvkXS?0$$LgR7xO-qbF6$+gvhm#
z`^&al$py&$WWxi>onD<;%&+p)WA~k!X?62VT}S6SaItqiw3jl<@5*gs2-vAcoy)Q7
z^H`HTr+RV8SUO{jQE3cSXH|3zDUlFiym>ZJ^{v(gao$W*VXJGAhB_EWTgf^C1^bK7
z{PfjM^mGq$qrIXIU;bC-fwAvC@BkESNUDDh6+9m2Xo+t?96SE4=M(UTOY6CYe7%CF
z?m>rEC)I@dQe9%lPgfND0+&p#z|7KB`xSz~w0%>qY@Vz%hlx3S0Q7D33_rb}!>4C(
z^X(G1nPYgIX{)T?wmR60qVNB-2|?8~>*~v9yq(kb%WscazrQ|MAGJ=~ofn$8|9brd
zYM`P_-<)MbekXm;cmFm%H$xSCOwk&USKEbVLZV-ebm-DkBgOqcobL<`WL3*^>(aL>
z-xEd+F!Qi!SeEPrUm!G=k3!bw=;^fzk^pSQ%>My9b%C${oI@!cGF@_>t#=H-6bFWc
zFG<9G>#~E?YEZuN^ZHpG(0y#3Zu5sG81oAfA2Dh@7#9@@4mq$SVzsiQUy;A`0$nk_
zQwvNv<z_s4Oqi^uU`EzY&7X?t0S8;ZOka%AbQ^vOYIpyFS-$48<Db8DQZHuD>PJ_-
z?da6E&U9hjWEQDEB~#Rx69*07?T=(B!uNZHW8Yp7747TdIK^}jeY(Ar!{UUSn5{!Z
z(pSB<HM!K4cZsqW80OczdLMmcrhKHKYRMe(DuV-Zjj8C^G<i1^$I5lvJf@lzNUgt1
za>nl7hxUcerr?RD{SylukvPn9wo`kWWIlJEsKhD<wbjk6J4|MYqR*TnWR`l$JoK*a
z8m;R)TN{`9yC)~L^Yui(@aL%YavaImQW)sYyLy#u$sa`Eu69Yuva}4%#XRtMWwhlv
zg?b)J>vV82&5f>z;7Woktj9UI2@Chc?_9e!WRe)vJC_NX=j5^&WvmBpbJ_O3H(`m-
zO8I#7QK}1zuK95n$$S?5m)E+$XJIu%swF`}hkrh#9au~gRb)A=*<~3dguv$)vdJi(
zG7tG?Uwx~vck#jx)cwbMn^(S*2?wYi$f_Vgh0AYx_kQxIe|CCSiuXxPNv;PeF@cMf
z87Ep;bWhgZpmJHKAPr^KQiA@hwNMlHqwy9LQ)CH{EYN02GhcF9W6?2Sb>)|czw(&t
zTliaB!!5^^#=OYzFh8Gl{ve^QN8)z0tdo|GTLHO_Su(2$3v|yogkjcr!J9`@4+o5M
z3Z{H61cz2{T-6nVmWLxc6?fjHol3C?SSt7dsy@Q79?ZR~Z9PYWaj$^p5foX!YZkKf
z5&5zo)h#YvEAm|t9Oro~ddlI5{zXHs)F@%BqDSzv<?lDEu{1d0$HPe>SJ%+NEQV6u
z%^mSGnnI0Zcoi~>&h4D8=T7tjv-(TZ7uun`K~WY1WykZ8arQqJOi|4^;Sm<c@4Tnr
zfD+bthsuTSAk>G4vCMNp(FNI8p3uzSPKR-M5wh-*I!d*-=JmK=rLxK@BF2JH3U5ML
zMv7%<By}{7%qx)2WvK@n)0sC8(5!@RKA&IXc-r&C+K%zPeu$KTybri}FNrfNCE4cP
z{@IJ}gO6JooligW@l;ED`J*r_v?ECHb@dCa6SB^cdiE(n(<X1yjuoA?eA`TJAWG`o
U;?d~SCiZaXD6%X*2;c4dAFlZMg8%>k

literal 0
HcmV?d00001

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index bf276bf2b1..360fdcdfa9 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -320,5 +320,4 @@ def reader():
         img -= settings.img_mean
         img = img * 0.007843
         return img
-
     return reader

From a09b41c5c9ce051c64bfca4d31ed96afd4eb0087 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 22:43:30 +0800
Subject: [PATCH 31/40] refine image

---
 fluid/object_detection/README.md | 20 +++++---------------
 1 file changed, 5 insertions(+), 15 deletions(-)

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 62be1a5b69..70d9d87c3f 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -90,24 +90,14 @@ env CUDA_VISIABLE_DEVICES=0 python infer.py --model_dir='train_coco_model/20' --
 ```
 Below is the examples after running python infer.py to inference and visualize the model result.
 <p align="center">
-<img src="images/COCO_val2014_000000000139.jpg" height=150 width=200 hspace='10'/>
-<img src="images/COCO_val2014_000000000785.jpg" height=150 width=200 hspace='10'/>
-<img src="images/COCO_val2014_000000000885.jpg" height=150 width=100 hspace='10'/>
-<img src="images/COCO_val2014_000000142324.jpg" height=150 width=200 hspace='10'/>
-<img src="images/COCO_val2014_000000144003.jpg" height=150 width=200 hspace='10'/> <br />
+<img src="images/COCO_val2014_000000000139.jpg" height=300 width=400 hspace='10'/>
+<img src="images/COCO_val2014_000000000785.jpg" height=300 width=400 hspace='10'/>
+<img src="images/COCO_val2014_000000000885.jpg" height=400 width=200 hspace='10'/>
+<img src="images/COCO_val2014_000000142324.jpg" height=300 width=400 hspace='10'/>
+<img src="images/COCO_val2014_000000144003.jpg" height=300 width=400 hspace='10'/> <br />
 MobileNet-SSD300x300 Visualization Examples
 </p>
 
-COCO_val2014_000000000139
-![COCO_val2014_000000000139.jpg](images/COCO_val2014_000000000139.jpg)
-COCO_val2014_000000000785
-![COCO_val2014_000000000785.jpg](images/COCO_val2014_000000000785.jpg)
-COCO_val2014_000000000885
-![COCO_val2014_000000000885.jpg](images/COCO_val2014_000000000885.jpg)
-COCO_val2014_000000142324
-![COCO_val2014_000000142324.jpg](images/COCO_val2014_000000142324.jpg)
-COCO_val2014_000000144003
-![COCO_val2014_000000144003.jpg](images/COCO_val2014_000000144003.jpg)
 TBD
 
 ### Released Model

From 932b150a0aebdcdeae97142b681278b1679f75b2 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Thu, 19 Apr 2018 22:50:44 +0800
Subject: [PATCH 32/40] refine

---
 fluid/object_detection/README.md | 1 -
 1 file changed, 1 deletion(-)

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 70d9d87c3f..151e903182 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -92,7 +92,6 @@ Below is the examples after running python infer.py to inference and visualize t
 <p align="center">
 <img src="images/COCO_val2014_000000000139.jpg" height=300 width=400 hspace='10'/>
 <img src="images/COCO_val2014_000000000785.jpg" height=300 width=400 hspace='10'/>
-<img src="images/COCO_val2014_000000000885.jpg" height=400 width=200 hspace='10'/>
 <img src="images/COCO_val2014_000000142324.jpg" height=300 width=400 hspace='10'/>
 <img src="images/COCO_val2014_000000144003.jpg" height=300 width=400 hspace='10'/> <br />
 MobileNet-SSD300x300 Visualization Examples

From 161aea811af22dc281bfb33291233336c3584733 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 20 Apr 2018 11:05:58 +0800
Subject: [PATCH 33/40] fix bug after merge

---
 fluid/object_detection/reader.py | 76 +-------------------------------
 fluid/object_detection/train.py  | 11 +++--
 2 files changed, 9 insertions(+), 78 deletions(-)

diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 7cf87573bb..360fdcdfa9 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -98,80 +98,7 @@ def img_mean(self):
         return self._img_mean
 
 
-def preprocess(img, bbox_labels, mode, settings):
-    img_width, img_height = img.size
-    sampled_labels = bbox_labels
-    if mode == 'train':
-        if settings._apply_distort:
-            img = image_util.distort_image(img, settings)
-        if settings._apply_expand:
-            img, bbox_labels, img_width, img_height = image_util.expand_image(
-                img, bbox_labels, img_width, img_height, settings)
-        # sampling
-        batch_sampler = []
-        # hard-code here
-        batch_sampler.append(
-            image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))
-        batch_sampler.append(
-            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))
-        batch_sampler.append(
-            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))
-        batch_sampler.append(
-            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))
-        batch_sampler.append(
-            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))
-        batch_sampler.append(
-            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))
-        batch_sampler.append(
-            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
-        sampled_bbox = image_util.generate_batch_samples(batch_sampler,
-                                                         bbox_labels)
-
-        img = np.array(img)
-        if len(sampled_bbox) > 0:
-            idx = int(random.uniform(0, len(sampled_bbox)))
-            img, sampled_labels = image_util.crop_image(
-                img, bbox_labels, sampled_bbox[idx], img_width, img_height)
-
-        img = Image.fromarray(img)
-    img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)
-    img = np.array(img)
-
-    if mode == 'train':
-        mirror = int(random.uniform(0, 2))
-        if mirror == 1:
-            img = img[:, ::-1, :]
-            for i in xrange(len(sampled_labels)):
-                tmp = sampled_labels[i][1]
-                sampled_labels[i][1] = 1 - sampled_labels[i][3]
-                sampled_labels[i][3] = 1 - tmp
-    # HWC to CHW
-    if len(img.shape) == 3:
-        img = np.swapaxes(img, 1, 2)
-        img = np.swapaxes(img, 1, 0)
-    # RBG to BGR
-    img = img[[2, 1, 0], :, :]
-    img = img.astype('float32')
-    img -= settings.img_mean
-    img = img * 0.007843
-    return img, sampled_labels
-
-
-def coco(settings, file_list, mode, shuffle):
-    # cocoapi
-    from pycocotools.coco import COCO
-    from pycocotools.cocoeval import COCOeval
-
-    coco = COCO(file_list)
-    image_ids = coco.getImgIds()
-    images = coco.loadImgs(image_ids)
-    category_ids = coco.getCatIds()
-    category_names = [item['name'] for item in coco.loadCats(category_ids)]
-
-    if not settings.toy == 0:
-        images = images[:settings.toy] if len(images) > settings.toy else images
-    print("{} on {} with {} images".format(mode, settings.dataset, len(images)))
-
+def _reader_creator(settings, file_list, mode, shuffle):
     def reader():
         if 'coco' in settings.dataset:
             # cocoapi 
@@ -197,6 +124,7 @@ def reader():
 
         if shuffle:
             random.shuffle(images)
+
         for image in images:
             if 'coco' in settings.dataset:
                 image_name = image['file_name']
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 8b3d6bff12..e17a1c2bf9 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -218,13 +218,16 @@ def parallel_exe(args,
         elif '2017' in train_file_list:
             epocs = 118287 / batch_size
             boundaries = [epcos * 12, epocs * 19]
+        values = [
+            learning_rate, learning_rate * 0.5, learning_rate * 0.25
+        ]
     elif data_args.dataset == 'pascalvoc':
         epocs = 19200 / batch_size
         boundaries = [epocs * 40, epocs * 60, epocs * 80, epocs * 100]
-    values = [
-        learning_rate, learning_rate * 0.5, learning_rate * 0.25,
-        learning_rate * 0.1, learning_rate * 0.01
-    ]
+        values = [
+            learning_rate, learning_rate * 0.5, learning_rate * 0.25,
+            learning_rate * 0.1, learning_rate * 0.01
+        ]
     optimizer = fluid.optimizer.RMSProp(
         learning_rate=fluid.layers.piecewise_decay(boundaries, values),
         regularization=fluid.regularizer.L2Decay(0.00005), )

From b0bfbc3fb6a43de18ee16a6aa1f6127fb16f7303 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Fri, 20 Apr 2018 11:37:10 +0800
Subject: [PATCH 34/40] follow yapf

---
 fluid/object_detection/eval.py       | 29 +++++++++++++++-------------
 fluid/object_detection/image_util.py |  3 ++-
 fluid/object_detection/infer.py      | 20 ++++++++++++-------
 fluid/object_detection/reader.py     |  7 +++++--
 4 files changed, 36 insertions(+), 23 deletions(-)

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 9e96221e4c..d4bb5208cb 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -68,7 +68,8 @@ def if_exist(var):
         reader.test(data_args, test_list), batch_size=batch_size)
     if 'cocoMAP' in data_args.ap_version:
         feeder = fluid.DataFeeder(
-            place=place, feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
+            place=place,
+            feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
     else:
         feeder = fluid.DataFeeder(
             place=place, feed_list=[image, gt_box, gt_label, difficult])
@@ -80,9 +81,9 @@ def test():
 
             for batch_id, data in enumerate(test_reader()):
                 nmsed_out_v = exe.run(fluid.default_main_program(),
-                                        feed=feeder.feed(data),
-                                        fetch_list=[nmsed_out],
-                                        return_numpy=False)
+                                      feed=feeder.feed(data),
+                                      fetch_list=[nmsed_out],
+                                      return_numpy=False)
                 if batch_id % 20 == 0:
                     print("Batch {0}".format(batch_id))
 
@@ -109,21 +110,21 @@ def test():
                         h = ymax - ymin
                         bbox = [xmin, ymin, w, h]
                         dt_res = {
-                            'image_id' : image_id,
-                            'category_id' : category_id,
-                            'bbox' : bbox,
-                            'score' : score
+                            'image_id': image_id,
+                            'category_id': category_id,
+                            'bbox': bbox,
+                            'score': score
                         }
                         dts_res.append(dt_res)
-            
+
             with open("detection_result.json", 'w') as outfile:
                 json.dump(dts_res, outfile)
             print("start evaluate using coco api")
             from pycocotools.coco import COCO
             from pycocotools.cocoeval import COCOeval
-            cocoGt=COCO(os.path.join(data_args.data_dir, test_list))
-            cocoDt=cocoGt.loadRes("detection_result.json")
-            cocoEval = COCOeval(cocoGt,cocoDt,"bbox")
+            cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
+            cocoDt = cocoGt.loadRes("detection_result.json")
+            cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
             cocoEval.evaluate()
             cocoEval.accumulate()
             cocoEval.summarize()
@@ -150,8 +151,10 @@ def test():
                 if batch_id % 20 == 0:
                     print("Batch {0}, map {1}".format(batch_id, test_map[0]))
             print("Test model {0}, map {1}".format(model_dir, test_map[0]))
+
     test()
 
+
 if __name__ == '__main__':
     args = parser.parse_args()
     print_arguments(args)
@@ -168,7 +171,7 @@ def test():
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        ap_version = args.ap_version,
+        ap_version=args.ap_version,
         toy=0,
         data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
         label_file=label_file,
diff --git a/fluid/object_detection/image_util.py b/fluid/object_detection/image_util.py
index ed801852ea..ca6963b6e1 100644
--- a/fluid/object_detection/image_util.py
+++ b/fluid/object_detection/image_util.py
@@ -4,7 +4,8 @@
 import random
 import math
 
-ImageFile.LOAD_TRUNCATED_IMAGES = True #otherwise IOError raised image file is truncated
+ImageFile.LOAD_TRUNCATED_IMAGES = True  #otherwise IOError raised image file is truncated
+
 
 class sampler():
     def __init__(self, max_sample, max_trial, min_scale, max_scale,
diff --git a/fluid/object_detection/infer.py b/fluid/object_detection/infer.py
index bedd7aeaeb..237a6b7f12 100644
--- a/fluid/object_detection/infer.py
+++ b/fluid/object_detection/infer.py
@@ -28,6 +28,7 @@
 add_arg('mean_value_R',     float, 127.5,  "mean value for R channel which will be subtracted")  #103.94
 # yapf: enable
 
+
 def infer(args, data_args, image_path, model_dir):
     image_shape = [3, data_args.resize_h, data_args.resize_w]
     if 'coco' in data_args.dataset:
@@ -44,26 +45,30 @@ def infer(args, data_args, image_path, model_dir):
     exe = fluid.Executor(place)
 
     if model_dir:
+
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
+
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     infer_reader = reader.infer(data_args, image_path)
-    feeder = fluid.DataFeeder(
-        place=place, feed_list=[image])
+    feeder = fluid.DataFeeder(place=place, feed_list=[image])
 
     def infer():
         data = infer_reader()
         nmsed_out_v = exe.run(fluid.default_main_program(),
-                                feed=feeder.feed([[data]]),
-                                fetch_list=[nmsed_out],
-                                return_numpy=False)
+                              feed=feeder.feed([[data]]),
+                              fetch_list=[nmsed_out],
+                              return_numpy=False)
         nmsed_out_v = np.array(nmsed_out_v[0])
-        draw_bounding_box_on_image(image_path, nmsed_out_v, args.confs_threshold)
+        draw_bounding_box_on_image(image_path, nmsed_out_v,
+                                   args.confs_threshold)
         for dt in nmsed_out_v:
             category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+
     infer()
 
+
 def draw_bounding_box_on_image(image_path, nms_out, confs_threshold):
     image = Image.open(image_path)
     draw = ImageDraw.Draw(image)
@@ -86,13 +91,14 @@ def draw_bounding_box_on_image(image_path, nms_out, confs_threshold):
     print("image with bbox drawed saved as {}".format(image_name))
     image.save(image_name)
 
+
 if __name__ == '__main__':
     args = parser.parse_args()
     print_arguments(args)
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        ap_version = '',
+        ap_version='',
         toy=0,
         data_dir='',
         label_file='',
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 360fdcdfa9..d0baef0a15 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -25,8 +25,8 @@
 
 
 class Settings(object):
-    def __init__(self, dataset, ap_version, toy, data_dir, label_file, resize_h, resize_w,
-                 mean_value, apply_distort, apply_expand):
+    def __init__(self, dataset, ap_version, toy, data_dir, label_file, resize_h,
+                 resize_w, mean_value, apply_distort, apply_expand):
         self._dataset = dataset
         self._ap_version = ap_version
         self._toy = toy
@@ -61,6 +61,7 @@ def dataset(self):
     @property
     def ap_version(self):
         return self._ap_version
+
     @property
     def toy(self):
         return self._toy
@@ -273,6 +274,7 @@ def reader():
 
     return reader
 
+
 def train(settings, file_list, shuffle=True):
     file_list = os.path.join(settings.data_dir, file_list)
     if 'coco' in settings.dataset:
@@ -320,4 +322,5 @@ def reader():
         img -= settings.img_mean
         img = img * 0.007843
         return img
+
     return reader

From 8e0d8558cf1ed95bd64aea70011d8b7ff5a939a1 Mon Sep 17 00:00:00 2001
From: buxingyuan <sefira32@gmail.com>
Date: Fri, 20 Apr 2018 23:17:55 +0800
Subject: [PATCH 35/40] follow comments

---
 fluid/object_detection/README.md       |   6 +-
 fluid/object_detection/eval.py         | 138 +++-------
 fluid/object_detection/eval_cocoMAP.py | 152 +++++++++++
 fluid/object_detection/infer.py        |  26 +-
 fluid/object_detection/reader.py       | 353 +++++++++++++------------
 fluid/object_detection/train.py        |  34 +--
 6 files changed, 399 insertions(+), 310 deletions(-)
 create mode 100644 fluid/object_detection/eval_cocoMAP.py

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 151e903182..7d10e8d152 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -75,10 +75,10 @@ TBD
 ### Evaluate
 
 ```python
-env CUDA_VISIABLE_DEVICES=0 python eval.py --model_dir='train_pascal_model/90' --test_list='' --ap_version='integral'
-env CUDA_VISIABLE_DEVICES=0 python eval.py --model_dir='train_coco_model/20' --ap_version='cocoMAP'
+env CUDA_VISIABLE_DEVICES=0 python eval.py --dataset='pascalvoc' --model_dir='train_pascal_model/90' --test_list='' --ap_version='11point'
+env CUDA_VISIABLE_DEVICES=0 python eval_cocoMAP.py --dataset='coco2014' --model_dir='train_coco_model/24'
 ```
-You can evaluate your trained model in difference metric like 11point, integral and cocoMAP which is a special mAP metric used in COCO dataset.
+You can evaluate your trained model in difference metric like 11point, integral on both PASCAL VOC and COCO dataset. Moreover, we provide eval_cocoMAP.py which uses a COCO-specific mAP metric defined by [COCO committee](http://cocodataset.org/#detections-eval). To use this eval_cocoMAP.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.
 Note we set the defualt test list to the dataset's test/val list, you can use your own test list by setting test_list args.
 
 TBD
diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index d4bb5208cb..56bf96bbfa 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -13,19 +13,19 @@
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
 # yapf: disable
-add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017, and pascalvoc.")
+add_arg('dataset',          str,   'pascalvoc',  "coco2014, coco2017, and pascalvoc.")
 add_arg('batch_size',       int,   32,        "Minibatch size.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
 add_arg('data_dir',         str,   '',        "The data root path.")
 add_arg('test_list',        str,   '',        "The testing data lists.")
 add_arg('model_dir',        str,   '',     "The model path.")
-add_arg('nms_threshold',    float, 0.5,    "nms threshold")
-add_arg('ap_version',       str,   'integral',   "integral, 11points, and cocoMAP")
-add_arg('resize_h',         int,   300,    "resize image size")
-add_arg('resize_w',         int,   300,    "resize image size")
-add_arg('mean_value_B',     float, 127.5,  "mean value for B channel which will be subtracted")  #123.68
-add_arg('mean_value_G',     float, 127.5,  "mean value for G channel which will be subtracted")  #116.78
-add_arg('mean_value_R',     float, 127.5,  "mean value for R channel which will be subtracted")  #103.94
+add_arg('nms_threshold',    float, 0.45,   "NMS threshold.")
+add_arg('ap_version',       str,   '11point',   "integral, 11point.")
+add_arg('resize_h',         int,   300,    "The resized image height.")
+add_arg('resize_w',         int,   300,    "The resized image height.")
+add_arg('mean_value_B',     float, 127.5,  "Mean value for B channel which will be subtracted.")  #123.68
+add_arg('mean_value_G',     float, 127.5,  "Mean value for G channel which will be subtracted.")  #116.78
+add_arg('mean_value_R',     float, 127.5,  "Mean value for R channel which will be subtracted.")  #103.94
 # yapf: enable
 
 
@@ -43,10 +43,6 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
         name='gt_label', shape=[1], dtype='int32', lod_level=1)
     difficult = fluid.layers.data(
         name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
-    gt_iscrowd = fluid.layers.data(
-        name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)
-    gt_image_info = fluid.layers.data(
-        name='gt_image_id', shape=[3], dtype='int32', lod_level=1)
 
     locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
     nmsed_out = fluid.layers.detection_output(
@@ -58,99 +54,37 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
     exe = fluid.Executor(place)
 
     if model_dir:
-
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
-
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     test_reader = paddle.batch(
         reader.test(data_args, test_list), batch_size=batch_size)
-    if 'cocoMAP' in data_args.ap_version:
-        feeder = fluid.DataFeeder(
-            place=place,
-            feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
-    else:
-        feeder = fluid.DataFeeder(
-            place=place, feed_list=[image, gt_box, gt_label, difficult])
+    feeder = fluid.DataFeeder(
+        place=place, feed_list=[image, gt_box, gt_label, difficult])
 
     def test():
-        if 'cocoMAP' in data_args.ap_version:
-            dts_res = []
-            import json
-
-            for batch_id, data in enumerate(test_reader()):
-                nmsed_out_v = exe.run(fluid.default_main_program(),
-                                      feed=feeder.feed(data),
-                                      fetch_list=[nmsed_out],
-                                      return_numpy=False)
-                if batch_id % 20 == 0:
-                    print("Batch {0}".format(batch_id))
-
-                lod = nmsed_out_v[0].lod()[0]
-                nmsed_out_v = np.array(nmsed_out_v[0])
-                real_batch_size = min(batch_size, len(data))
-                assert (len(lod) == real_batch_size + 1), \
-                "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
-                k = 0
-                for i in range(real_batch_size):
-                    dt_num_this_img = lod[i + 1] - lod[i]
-                    image_id = int(data[i][4][0])
-                    image_width = int(data[i][4][1])
-                    image_height = int(data[i][4][2])
-                    for j in range(dt_num_this_img):
-                        dt = nmsed_out_v[k]
-                        k = k + 1
-                        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
-                        xmin = max(min(xmin, 1.0), 0.0) * image_width
-                        ymin = max(min(ymin, 1.0), 0.0) * image_height
-                        xmax = max(min(xmax, 1.0), 0.0) * image_width
-                        ymax = max(min(ymax, 1.0), 0.0) * image_height
-                        w = xmax - xmin
-                        h = ymax - ymin
-                        bbox = [xmin, ymin, w, h]
-                        dt_res = {
-                            'image_id': image_id,
-                            'category_id': category_id,
-                            'bbox': bbox,
-                            'score': score
-                        }
-                        dts_res.append(dt_res)
-
-            with open("detection_result.json", 'w') as outfile:
-                json.dump(dts_res, outfile)
-            print("start evaluate using coco api")
-            from pycocotools.coco import COCO
-            from pycocotools.cocoeval import COCOeval
-            cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
-            cocoDt = cocoGt.loadRes("detection_result.json")
-            cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
-            cocoEval.evaluate()
-            cocoEval.accumulate()
-            cocoEval.summarize()
-
-        else:
-            test_program = fluid.default_main_program().clone(for_test=True)
-            with fluid.program_guard(test_program):
-                map_eval = fluid.evaluator.DetectionMAP(
-                    nmsed_out,
-                    gt_label,
-                    gt_box,
-                    difficult,
-                    num_classes,
-                    overlap_threshold=0.5,
-                    evaluate_difficult=False,
-                    ap_version=args.ap_version)
-
-            _, accum_map = map_eval.get_map_var()
-            map_eval.reset(exe)
-            for batch_id, data in enumerate(test_reader()):
-                test_map = exe.run(test_program,
-                                   feed=feeder.feed(data),
-                                   fetch_list=[accum_map])
-                if batch_id % 20 == 0:
-                    print("Batch {0}, map {1}".format(batch_id, test_map[0]))
-            print("Test model {0}, map {1}".format(model_dir, test_map[0]))
+        test_program = fluid.default_main_program().clone(for_test=True)
+        with fluid.program_guard(test_program):
+            map_eval = fluid.evaluator.DetectionMAP(
+                nmsed_out,
+                gt_label,
+                gt_box,
+                difficult,
+                num_classes,
+                overlap_threshold=0.5,
+                evaluate_difficult=False,
+                ap_version=args.ap_version)
+
+        _, accum_map = map_eval.get_map_var()
+        map_eval.reset(exe)
+        for batch_id, data in enumerate(test_reader()):
+            test_map = exe.run(test_program,
+                               feed=feeder.feed(data),
+                               fetch_list=[accum_map])
+            if batch_id % 20 == 0:
+                print("Batch {0}, map {1}".format(batch_id, test_map[0]))
+        print("Test model {0}, map {1}".format(model_dir, test_map[0]))
 
     test()
 
@@ -171,15 +105,15 @@ def test():
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        ap_version=args.ap_version,
-        toy=0,
         data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
         label_file=label_file,
-        apply_distort=False,
-        apply_expand=False,
         resize_h=args.resize_h,
         resize_w=args.resize_w,
-        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
+        apply_distort=False,
+        apply_expand=False,
+        ap_version=args.ap_version,
+        toy=0)
     eval(
         args,
         data_args=data_args,
diff --git a/fluid/object_detection/eval_cocoMAP.py b/fluid/object_detection/eval_cocoMAP.py
new file mode 100644
index 0000000000..a6ae62b2fe
--- /dev/null
+++ b/fluid/object_detection/eval_cocoMAP.py
@@ -0,0 +1,152 @@
+import os
+import time
+import numpy as np
+import argparse
+import functools
+
+import paddle
+import paddle.fluid as fluid
+import reader
+from mobilenet_ssd import mobile_net
+from utility import add_arguments, print_arguments
+
+# A special mAP metric for COCO dataset, which averages AP in different IoUs.
+# To use this eval_cocoMAP.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.
+import json
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+
+parser = argparse.ArgumentParser(description=__doc__)
+add_arg = functools.partial(add_arguments, argparser=parser)
+# yapf: disable
+add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017.")
+add_arg('batch_size',       int,   32,        "Minibatch size.")
+add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
+add_arg('data_dir',         str,   '',        "The data root path.")
+add_arg('test_list',        str,   '',        "The testing data lists.")
+add_arg('model_dir',        str,   '',     "The model path.")
+add_arg('nms_threshold',    float, 0.5,    "NMS threshold.")
+add_arg('ap_version',       str,   'cocoMAP',   "cocoMAP.")
+add_arg('resize_h',         int,   300,    "The resized image height.")
+add_arg('resize_w',         int,   300,    "The resized image height.")
+add_arg('mean_value_B',     float, 127.5,  "Mean value for B channel which will be subtracted.")  #123.68
+add_arg('mean_value_G',     float, 127.5,  "Mean value for G channel which will be subtracted.")  #116.78
+add_arg('mean_value_R',     float, 127.5,  "Mean value for R channel which will be subtracted.")  #103.94
+# yapf: enable
+
+
+def eval(args, data_args, test_list, batch_size, model_dir=None):
+    image_shape = [3, data_args.resize_h, data_args.resize_w]
+    num_classes = 91
+
+    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
+    gt_box = fluid.layers.data(
+        name='gt_box', shape=[4], dtype='float32', lod_level=1)
+    gt_label = fluid.layers.data(
+        name='gt_label', shape=[1], dtype='int32', lod_level=1)
+    gt_iscrowd = fluid.layers.data(
+        name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)
+    gt_image_info = fluid.layers.data(
+        name='gt_image_id', shape=[3], dtype='int32', lod_level=1)
+
+    locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
+    nmsed_out = fluid.layers.detection_output(
+        locs, confs, box, box_var, nms_threshold=args.nms_threshold)
+    loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
+    loss = fluid.layers.reduce_sum(loss)
+
+    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
+    exe = fluid.Executor(place)
+
+    if model_dir:
+        def if_exist(var):
+            return os.path.exists(os.path.join(model_dir, var.name))
+        fluid.io.load_vars(exe, model_dir, predicate=if_exist)
+
+    test_reader = paddle.batch(
+        reader.test(data_args, test_list), batch_size=batch_size)
+    feeder = fluid.DataFeeder(
+        place=place,
+        feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
+
+    def test():
+            dts_res = []
+
+            for batch_id, data in enumerate(test_reader()):
+                nmsed_out_v = exe.run(fluid.default_main_program(),
+                                      feed=feeder.feed(data),
+                                      fetch_list=[nmsed_out],
+                                      return_numpy=False)
+                if batch_id % 20 == 0:
+                    print("Batch {0}".format(batch_id))
+
+                lod = nmsed_out_v[0].lod()[0]
+                nmsed_out_v = np.array(nmsed_out_v[0])
+                real_batch_size = min(batch_size, len(data))
+                assert (len(lod) == real_batch_size + 1), \
+                "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
+                k = 0
+                for i in range(real_batch_size):
+                    dt_num_this_img = lod[i + 1] - lod[i]
+                    image_id = int(data[i][4][0])
+                    image_width = int(data[i][4][1])
+                    image_height = int(data[i][4][2])
+                    for j in range(dt_num_this_img):
+                        dt = nmsed_out_v[k]
+                        k = k + 1
+                        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+                        xmin = max(min(xmin, 1.0), 0.0) * image_width
+                        ymin = max(min(ymin, 1.0), 0.0) * image_height
+                        xmax = max(min(xmax, 1.0), 0.0) * image_width
+                        ymax = max(min(ymax, 1.0), 0.0) * image_height
+                        w = xmax - xmin
+                        h = ymax - ymin
+                        bbox = [xmin, ymin, w, h]
+                        dt_res = {
+                            'image_id': image_id,
+                            'category_id': category_id,
+                            'bbox': bbox,
+                            'score': score
+                        }
+                        dts_res.append(dt_res)
+
+            with open("detection_result.json", 'w') as outfile:
+                json.dump(dts_res, outfile)
+            print("start evaluate using coco api")
+            cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
+            cocoDt = cocoGt.loadRes("detection_result.json")
+            cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
+            cocoEval.evaluate()
+            cocoEval.accumulate()
+            cocoEval.summarize()
+
+    test()
+
+
+if __name__ == '__main__':
+    args = parser.parse_args()
+    print_arguments(args)
+
+    data_dir = './data/coco'
+    if '2014' in args.dataset:
+        test_list = 'annotations/instances_minival2014.json'
+    elif '2017' in args.dataset:
+        test_list = 'annotations/instances_val2017.json'
+
+    data_args = reader.Settings(
+        dataset=args.dataset,
+        data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
+        label_file=label_file,
+        resize_h=args.resize_h,
+        resize_w=args.resize_w,
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
+        apply_distort=False,
+        apply_expand=False,
+        ap_version=args.ap_version,
+        toy=0)
+    eval(
+        args,
+        data_args=data_args,
+        test_list=args.test_list if len(args.test_list) > 0 else test_list,
+        batch_size=args.batch_size,
+        model_dir=args.model_dir)
diff --git a/fluid/object_detection/infer.py b/fluid/object_detection/infer.py
index 237a6b7f12..b27aa5e688 100644
--- a/fluid/object_detection/infer.py
+++ b/fluid/object_detection/infer.py
@@ -15,17 +15,17 @@
 parser = argparse.ArgumentParser(description=__doc__)
 add_arg = functools.partial(add_arguments, argparser=parser)
 # yapf: disable
-add_arg('dataset',          str,   'coco',    "coco and pascalvoc.")
+add_arg('dataset',          str,   'pascalvoc',    "coco and pascalvoc.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
 add_arg('image_path',       str,   '',        "The image used to inference and visualize.")
 add_arg('model_dir',        str,   '',     "The model path.")
-add_arg('nms_threshold',    float, 0.5,    "nms threshold")
-add_arg('confs_threshold',  float, 0.2,    "confidence threshold for draw bbox")
-add_arg('resize_h',         int,   300,    "resize image size")
-add_arg('resize_w',         int,   300,    "resize image size")
-add_arg('mean_value_B',     float, 127.5,  "mean value for B channel which will be subtracted")  #123.68
-add_arg('mean_value_G',     float, 127.5,  "mean value for G channel which will be subtracted")  #116.78
-add_arg('mean_value_R',     float, 127.5,  "mean value for R channel which will be subtracted")  #103.94
+add_arg('nms_threshold',    float, 0.45,   "NMS threshold.")
+add_arg('confs_threshold',  float, 0.2,    "Confidence threshold to draw bbox.")
+add_arg('resize_h',         int,   300,    "The resized image height.")
+add_arg('resize_w',         int,   300,    "The resized image height.")
+add_arg('mean_value_B',     float, 127.5,  "Mean value for B channel which will be subtracted.")  #123.68
+add_arg('mean_value_G',     float, 127.5,  "Mean value for G channel which will be subtracted.")  #116.78
+add_arg('mean_value_R',     float, 127.5,  "Mean value for R channel which will be subtracted.")  #103.94
 # yapf: enable
 
 
@@ -98,15 +98,15 @@ def draw_bounding_box_on_image(image_path, nms_out, confs_threshold):
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        ap_version='',
-        toy=0,
         data_dir='',
         label_file='',
-        apply_distort=False,
-        apply_expand=False,
         resize_h=args.resize_h,
         resize_w=args.resize_w,
-        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
+        apply_distort=False,
+        apply_expand=False,
+        ap_version='',
+        toy=0)
     infer(
         args,
         data_args=data_args,
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index d0baef0a15..686b23dbd7 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -25,8 +25,17 @@
 
 
 class Settings(object):
-    def __init__(self, dataset, ap_version, toy, data_dir, label_file, resize_h,
-                 resize_w, mean_value, apply_distort, apply_expand):
+    def __init__(self,
+                 dataset=None,
+                 data_dir=None,
+                 label_file=None,
+                 resize_h=300,
+                 resize_w=300,
+                 mean_value=[127.5, 127.5, 127.5],
+                 apply_distort=True,
+                 apply_expand=True,
+                 ap_version='11point',
+                 toy=0): 
         self._dataset = dataset
         self._ap_version = ap_version
         self._toy = toy
@@ -99,178 +108,172 @@ def img_mean(self):
         return self._img_mean
 
 
-def _reader_creator(settings, file_list, mode, shuffle):
+def preprocess(img, bbox_labels, mode, settings):
+    img_width, img_height = img.size
+    sampled_labels = bbox_labels
+    if mode == 'train':
+        if settings._apply_distort:
+            img = image_util.distort_image(img, settings)
+        if settings._apply_expand:
+            img, bbox_labels, img_width, img_height = image_util.expand_image(
+                img, bbox_labels, img_width, img_height, settings)
+        # sampling
+        batch_sampler = []
+        # hard-code here
+        batch_sampler.append(
+            image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))
+        batch_sampler.append(
+            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))
+        batch_sampler.append(
+            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))
+        batch_sampler.append(
+            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))
+        batch_sampler.append(
+            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))
+        batch_sampler.append(
+            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))
+        batch_sampler.append(
+            image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
+        sampled_bbox = image_util.generate_batch_samples(batch_sampler,
+                                                         bbox_labels)
+
+        img = np.array(img)
+        if len(sampled_bbox) > 0:
+            idx = int(random.uniform(0, len(sampled_bbox)))
+            img, sampled_labels = image_util.crop_image(
+                img, bbox_labels, sampled_bbox[idx], img_width, img_height)
+
+        img = Image.fromarray(img)
+    img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)
+    img = np.array(img)
+
+    if mode == 'train':
+        mirror = int(random.uniform(0, 2))
+        if mirror == 1:
+            img = img[:, ::-1, :]
+            for i in xrange(len(sampled_labels)):
+                tmp = sampled_labels[i][1]
+                sampled_labels[i][1] = 1 - sampled_labels[i][3]
+                sampled_labels[i][3] = 1 - tmp
+    # HWC to CHW
+    if len(img.shape) == 3:
+        img = np.swapaxes(img, 1, 2)
+        img = np.swapaxes(img, 1, 0)
+    # RBG to BGR
+    img = img[[2, 1, 0], :, :]
+    img = img.astype('float32')
+    img -= settings.img_mean
+    img = img * 0.007843
+    return img, sampled_labels
+
+
+def coco(settings, file_list, mode, shuffle):
+    # cocoapi
+    from pycocotools.coco import COCO
+    from pycocotools.cocoeval import COCOeval
+
+    coco = COCO(file_list)
+    image_ids = coco.getImgIds()
+    images = coco.loadImgs(image_ids)
+    category_ids = coco.getCatIds()
+    category_names = [item['name'] for item in coco.loadCats(category_ids)]
+
+    if not settings.toy == 0:
+        images = images[:settings.toy] if len(images) > settings.toy else images
+    print("{} on {} with {} images".format(mode, settings.dataset, len(images)))
+
     def reader():
-        if 'coco' in settings.dataset:
-            # cocoapi 
-            from pycocotools.coco import COCO
-            from pycocotools.cocoeval import COCOeval
-
-            coco = COCO(file_list)
-            image_ids = coco.getImgIds()
-            images = coco.loadImgs(image_ids)
-            category_ids = coco.getCatIds()
-            category_names = [
-                item['name'] for item in coco.loadCats(category_ids)
-            ]
-        elif 'pascalvoc' in settings.dataset:
-            flist = open(file_list)
-            images = [line.strip() for line in flist]
-
-        if not settings.toy == 0:
-            images = images[:settings.toy] if len(
-                images) > settings.toy else images
-        print("{} on {} with {} images".format(mode, settings.dataset,
-                                               len(images)))
-
-        if shuffle:
+        if mode == 'train' and shuffle:
             random.shuffle(images)
-
         for image in images:
-            if 'coco' in settings.dataset:
-                image_name = image['file_name']
-                image_path = os.path.join(settings.data_dir, image_name)
-            elif 'pascalvoc' in settings.dataset:
-                if mode == 'train' or mode == 'test':
-                    image_path, label_path = image.split()
-                    image_path = os.path.join(settings.data_dir, image_path)
-                    label_path = os.path.join(settings.data_dir, label_path)
-                elif mode == 'infer':
-                    image_path = os.path.join(settings.data_dir, image)
-
-            img = Image.open(image_path)
-            if img.mode == 'L':
-                img = img.convert('RGB')
-            img_width, img_height = img.size
+            image_name = image['file_name']
+            image_path = os.path.join(settings.data_dir, image_name)
+
+            im = Image.open(image_path)
+            if im.mode == 'L':
+                im = im.convert('RGB')
+            im_width, im_height = im.size
             img_id = image['id']
 
-            if mode == 'train' or mode == 'test':
-                if 'coco' in settings.dataset:
-                    # layout: label | xmin | ymin | xmax | ymax | iscrowd | area | image_id | category_id
-                    bbox_labels = []
-                    annIds = coco.getAnnIds(imgIds=image['id'])
-                    anns = coco.loadAnns(annIds)
-                    for ann in anns:
-                        bbox_sample = []
-                        # start from 1, leave 0 to background
-                        bbox_sample.append(float(ann['category_id']))
-                        #float(category_ids.index(ann['category_id'])) + 1)
-                        bbox = ann['bbox']
-                        xmin, ymin, w, h = bbox
-                        xmax = xmin + w
-                        ymax = ymin + h
-                        bbox_sample.append(float(xmin) / img_width)
-                        bbox_sample.append(float(ymin) / img_height)
-                        bbox_sample.append(float(xmax) / img_width)
-                        bbox_sample.append(float(ymax) / img_height)
-                        bbox_sample.append(float(ann['iscrowd']))
-                        #bbox_sample.append(ann['area'])
-                        #bbox_sample.append(ann['image_id'])
-                        #bbox_sample.append(ann['category_id'])
-                        #bbox_sample.append(ann['id'])
-                        #bbox_sample.append(ann['bbox'])
-                        #bbox_sample.append(ann['segmentation'])
-                        bbox_labels.append(bbox_sample)
-                elif 'pascalvoc' in settings.dataset:
-                    # layout: label | xmin | ymin | xmax | ymax | difficult
-                    bbox_labels = []
-                    root = xml.etree.ElementTree.parse(label_path).getroot()
-                    for object in root.findall('object'):
-                        bbox_sample = []
-                        # start from 1
-                        bbox_sample.append(
-                            float(
-                                settings.label_list.index(
-                                    object.find('name').text)))
-                        bbox = object.find('bndbox')
-                        difficult = float(object.find('difficult').text)
-                        bbox_sample.append(
-                            float(bbox.find('xmin').text) / img_width)
-                        bbox_sample.append(
-                            float(bbox.find('ymin').text) / img_height)
-                        bbox_sample.append(
-                            float(bbox.find('xmax').text) / img_width)
-                        bbox_sample.append(
-                            float(bbox.find('ymax').text) / img_height)
-                        bbox_sample.append(difficult)
-                        bbox_labels.append(bbox_sample)
-
-                sample_labels = bbox_labels
-                if mode == 'train':
-                    if settings._apply_distort:
-                        img = image_util.distort_image(img, settings)
-                    if settings._apply_expand:
-                        img, bbox_labels, img_width, img_height = image_util.expand_image(
-                            img, bbox_labels, img_width, img_height, settings)
-                    batch_sampler = []
-                    # hard-code here
-                    batch_sampler.append(
-                        image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))
-                    batch_sampler.append(
-                        image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))
-                    """ random crop """
-                    sampled_bbox = image_util.generate_batch_samples(
-                        batch_sampler, bbox_labels)
-
-                    img = np.array(img)
-                    if len(sampled_bbox) > 0:
-                        idx = int(random.uniform(0, len(sampled_bbox)))
-                        img, sample_labels = image_util.crop_image(
-                            img, bbox_labels, sampled_bbox[idx], img_width,
-                            img_height)
-
-                    img = Image.fromarray(img)
-            img = img.resize((settings.resize_w, settings.resize_h),
-                             Image.ANTIALIAS)
-            img = np.array(img)
-
-            if mode == 'train':
-                mirror = int(random.uniform(0, 2))
-                if mirror == 1:
-                    img = img[:, ::-1, :]
-                    for i in xrange(len(sample_labels)):
-                        tmp = sample_labels[i][1]
-                        sample_labels[i][1] = 1 - sample_labels[i][3]
-                        sample_labels[i][3] = 1 - tmp
-
-            # HWC to CHW
-            if len(img.shape) == 3:
-                img = np.swapaxes(img, 1, 2)
-                img = np.swapaxes(img, 1, 0)
-            # RBG to BGR
-            img = img[[2, 1, 0], :, :]
-            img = img.astype('float32')
-            img -= settings.img_mean
-            img = img.flatten()
-            img = img * 0.007843
-
-            if mode == 'train' or mode == 'test':
-                sample_labels = np.array(sample_labels)
-                if len(sample_labels) == 0:
-                    continue
-                if 'cocoMAP' in settings.ap_version:
-                    yield img.astype('float32'), \
-                        sample_labels[:, 1:5], \
-                        sample_labels[:, 0].astype('int32'), \
-                        sample_labels[:, 5].astype('int32'), \
-                        [img_id, img_width, img_height]
-                else:
-                    yield img.astype(
-                        'float32'
-                    ), sample_labels[:, 1:5], sample_labels[:, 0].astype(
-                        'int32'), sample_labels[:, -1].astype('int32')
-            elif mode == 'infer':
-                yield img.astype('float32')
+            # layout: category_id | xmin | ymin | xmax | ymax | iscrowd
+            bbox_labels = []
+            annIds = coco.getAnnIds(imgIds=image['id'])
+            anns = coco.loadAnns(annIds)
+            for ann in anns:
+                bbox_sample = []
+                # start from 1, leave 0 to background
+                bbox_sample.append(float(ann['category_id']))
+                #float(category_ids.index(ann['category_id'])) + 1)
+                bbox = ann['bbox']
+                xmin, ymin, w, h = bbox
+                xmax = xmin + w
+                ymax = ymin + h
+                bbox_sample.append(float(xmin) / im_width)
+                bbox_sample.append(float(ymin) / im_height)
+                bbox_sample.append(float(xmax) / im_width)
+                bbox_sample.append(float(ymax) / im_height)
+                bbox_sample.append(float(ann['iscrowd']))
+                bbox_labels.append(bbox_sample)
+            im, sample_labels = preprocess(im, bbox_labels, mode, settings)
+            sample_labels = np.array(sample_labels)
+            if len(sample_labels) == 0: continue
+            im = im.astype('float32')
+            boxes = sample_labels[:, 1:5]
+            lbls = sample_labels[:, 0].astype('int32')
+            iscrowd = sample_labels[:, -1].astype('int32')
+            if 'cocoMAP' in settings.ap_version:
+                yield im, boxes, lbls, iscrowd, \
+                    [img_id, img_width, img_height]
+            else:
+                yield im, boxes, lbls, iscrowd
+
+    return reader
+
+
+def pascalvoc(settings, file_list, mode, shuffle):
+    flist = open(file_list)
+    images = [line.strip() for line in flist]
+    if not settings.toy == 0:
+        images = images[:settings.toy] if len(images) > settings.toy else images
+    print("{} on {} with {} images".format(mode, settings.dataset, len(images)))
+
+    def reader():
+        if mode == 'train' and shuffle:
+            random.shuffle(images)
+        for image in images:
+            image_path, label_path = image.split()
+            image_path = os.path.join(settings.data_dir, image_path)
+            label_path = os.path.join(settings.data_dir, label_path)
+
+            im = Image.open(image_path)
+            if im.mode == 'L':
+                im = im.convert('RGB')
+            im_width, im_height = im.size
+
+            # layout: label | xmin | ymin | xmax | ymax | difficult
+            bbox_labels = []
+            root = xml.etree.ElementTree.parse(label_path).getroot()
+            for object in root.findall('object'):
+                bbox_sample = []
+                # start from 1
+                bbox_sample.append(
+                    float(settings.label_list.index(object.find('name').text)))
+                bbox = object.find('bndbox')
+                difficult = float(object.find('difficult').text)
+                bbox_sample.append(float(bbox.find('xmin').text) / im_width)
+                bbox_sample.append(float(bbox.find('ymin').text) / im_height)
+                bbox_sample.append(float(bbox.find('xmax').text) / im_width)
+                bbox_sample.append(float(bbox.find('ymax').text) / im_height)
+                bbox_sample.append(difficult)
+                bbox_labels.append(bbox_sample)
+            im, sample_labels = preprocess(im, bbox_labels, mode, settings)
+            sample_labels = np.array(sample_labels)
+            if len(sample_labels) == 0: continue
+            im = im.astype('float32')
+            boxes = sample_labels[:, 1:5]
+            lbls = sample_labels[:, 0].astype('int32')
+            difficults = sample_labels[:, -1].astype('int32')
+            yield im, boxes, lbls, difficults
 
     return reader
 
@@ -284,9 +287,9 @@ def train(settings, file_list, shuffle=True):
         elif '2017' in file_list:
             sub_dir = "train2017"
         train_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
-        return _reader_creator(train_settings, file_list, 'train', shuffle)
-    elif 'pascalvoc' in settings.dataset:
-        return _reader_creator(settings, file_list, 'train', shuffle)
+        return coco(train_settings, file_list, 'train', shuffle)
+    else:
+        return pascalvoc(settings, file_list, 'train', shuffle)
 
 
 def test(settings, file_list):
@@ -298,9 +301,9 @@ def test(settings, file_list):
         elif '2017' in file_list:
             sub_dir = "val2017"
         test_settings.data_dir = os.path.join(settings.data_dir, sub_dir)
-        return _reader_creator(test_settings, file_list, 'test', False)
-    elif 'pascalvoc' in settings.dataset:
-        return _reader_creator(settings, file_list, 'test', False)
+        return coco(test_settings, file_list, 'test', False)
+    else:
+        return pascalvoc(settings, file_list, 'test', False)
 
 
 def infer(settings, image_path):
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index e17a1c2bf9..7ccce8405f 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -18,20 +18,20 @@
 add_arg('batch_size',       int,   32,        "Minibatch size.")
 add_arg('num_passes',       int,   25,        "Epoch number.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
-add_arg('dataset',          str,   'coco2014',  "coco2014, coco2017, and pascalvoc.")
+add_arg('dataset',          str,   'pascalvoc', "coco2014, coco2017, and pascalvoc.")
 add_arg('model_save_dir',   str,   'model',     "The path to save model.")
 add_arg('pretrained_model', str,   'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
-add_arg('apply_distort',    bool,  True,   "Whether apply distort")
-add_arg('apply_expand',     bool,  False,  "Whether appley expand")
-add_arg('nms_threshold',    float, 0.5,    "nms threshold")
-add_arg('ap_version',       str,   'integral',   "integral, 11points")
-add_arg('resize_h',         int,   300,    "resize image size")
-add_arg('resize_w',         int,   300,    "resize image size")
-add_arg('mean_value_B',     float, 127.5, "mean value which will be subtracted")  #123.68
-add_arg('mean_value_G',     float, 127.5, "mean value which will be subtracted")  #116.78
-add_arg('mean_value_R',     float, 127.5, "mean value which will be subtracted")  #103.94
-add_arg('is_toy',           int,   0, "Toy for quick debug, 0 means using all data, while n means using only n sample")
-# yapf: disable
+add_arg('apply_distort',    bool,  True,   "Whether apply distort.")
+add_arg('apply_expand',     bool,  False,  "Whether appley expand.")
+add_arg('nms_threshold',    float, 0.45,   "NMS threshold.")
+add_arg('ap_version',       str,   'integral',   "integral, 11point.")
+add_arg('resize_h',         int,   300,    "The resized image height.")
+add_arg('resize_w',         int,   300,    "The resized image height.")
+add_arg('mean_value_B',     float, 127.5,  "Mean value for B channel which will be subtracted.")  #123.68
+add_arg('mean_value_G',     float, 127.5,  "Mean value for G channel which will be subtracted.")  #116.78
+add_arg('mean_value_R',     float, 127.5,  "Mean value for R channel which will be subtracted.")  #103.94
+add_arg('is_toy',           int,   0, "Toy for quick debug, 0 means using all data, while n means using only n sample.")
+#yapf: enable
 
 def parallel_do(args,
                 train_file_list,
@@ -319,15 +319,15 @@ def test(pass_id, best_map):
 
     data_args = reader.Settings(
         dataset=args.dataset,
-        ap_version = args.ap_version,
-        toy=args.is_toy,
         data_dir=data_dir,
         label_file=label_file,
-        apply_distort=args.apply_distort,
-        apply_expand=args.apply_expand,
         resize_h=args.resize_h,
         resize_w=args.resize_w,
-        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
+        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
+        apply_distort=args.apply_distort,
+        apply_expand=args.apply_expand,
+        ap_version = args.ap_version,
+        toy=args.is_toy)
     method = parallel_exe
     method(
         args,

From 88bc588bfdc77b4858fae8e2ce828484022688cc Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Sat, 21 Apr 2018 12:02:15 +0800
Subject: [PATCH 36/40] fix bug after separate eval and eval_cocoMAP

---
 fluid/object_detection/eval_cocoMAP.py | 2 +-
 fluid/object_detection/reader.py       | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/fluid/object_detection/eval_cocoMAP.py b/fluid/object_detection/eval_cocoMAP.py
index a6ae62b2fe..2b5f180d54 100644
--- a/fluid/object_detection/eval_cocoMAP.py
+++ b/fluid/object_detection/eval_cocoMAP.py
@@ -136,7 +136,7 @@ def test():
     data_args = reader.Settings(
         dataset=args.dataset,
         data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
-        label_file=label_file,
+        label_file='',
         resize_h=args.resize_h,
         resize_w=args.resize_w,
         mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 686b23dbd7..67af57f1a7 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -193,7 +193,7 @@ def reader():
             if im.mode == 'L':
                 im = im.convert('RGB')
             im_width, im_height = im.size
-            img_id = image['id']
+            im_id = image['id']
 
             # layout: category_id | xmin | ymin | xmax | ymax | iscrowd
             bbox_labels = []
@@ -223,7 +223,7 @@ def reader():
             iscrowd = sample_labels[:, -1].astype('int32')
             if 'cocoMAP' in settings.ap_version:
                 yield im, boxes, lbls, iscrowd, \
-                    [img_id, img_width, img_height]
+                    [im_id, im_width, im_height]
             else:
                 yield im, boxes, lbls, iscrowd
 

From a08b8e5e9447e67d61728f86991674115a9e9e27 Mon Sep 17 00:00:00 2001
From: buxingyuan <buxingyuan@baidu.com>
Date: Sat, 21 Apr 2018 12:07:31 +0800
Subject: [PATCH 37/40] follow yapf

---
 fluid/object_detection/eval.py         |   2 +
 fluid/object_detection/eval_cocoMAP.py | 100 +++++++++++++------------
 fluid/object_detection/reader.py       |   2 +-
 3 files changed, 54 insertions(+), 50 deletions(-)

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 56bf96bbfa..66716c2d46 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -54,8 +54,10 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
     exe = fluid.Executor(place)
 
     if model_dir:
+
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
+
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     test_reader = paddle.batch(
diff --git a/fluid/object_detection/eval_cocoMAP.py b/fluid/object_detection/eval_cocoMAP.py
index 2b5f180d54..03eaccf5a3 100644
--- a/fluid/object_detection/eval_cocoMAP.py
+++ b/fluid/object_detection/eval_cocoMAP.py
@@ -59,8 +59,10 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
     exe = fluid.Executor(place)
 
     if model_dir:
+
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
+
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     test_reader = paddle.batch(
@@ -70,55 +72,55 @@ def if_exist(var):
         feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
 
     def test():
-            dts_res = []
-
-            for batch_id, data in enumerate(test_reader()):
-                nmsed_out_v = exe.run(fluid.default_main_program(),
-                                      feed=feeder.feed(data),
-                                      fetch_list=[nmsed_out],
-                                      return_numpy=False)
-                if batch_id % 20 == 0:
-                    print("Batch {0}".format(batch_id))
-
-                lod = nmsed_out_v[0].lod()[0]
-                nmsed_out_v = np.array(nmsed_out_v[0])
-                real_batch_size = min(batch_size, len(data))
-                assert (len(lod) == real_batch_size + 1), \
-                "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
-                k = 0
-                for i in range(real_batch_size):
-                    dt_num_this_img = lod[i + 1] - lod[i]
-                    image_id = int(data[i][4][0])
-                    image_width = int(data[i][4][1])
-                    image_height = int(data[i][4][2])
-                    for j in range(dt_num_this_img):
-                        dt = nmsed_out_v[k]
-                        k = k + 1
-                        category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
-                        xmin = max(min(xmin, 1.0), 0.0) * image_width
-                        ymin = max(min(ymin, 1.0), 0.0) * image_height
-                        xmax = max(min(xmax, 1.0), 0.0) * image_width
-                        ymax = max(min(ymax, 1.0), 0.0) * image_height
-                        w = xmax - xmin
-                        h = ymax - ymin
-                        bbox = [xmin, ymin, w, h]
-                        dt_res = {
-                            'image_id': image_id,
-                            'category_id': category_id,
-                            'bbox': bbox,
-                            'score': score
-                        }
-                        dts_res.append(dt_res)
-
-            with open("detection_result.json", 'w') as outfile:
-                json.dump(dts_res, outfile)
-            print("start evaluate using coco api")
-            cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
-            cocoDt = cocoGt.loadRes("detection_result.json")
-            cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
-            cocoEval.evaluate()
-            cocoEval.accumulate()
-            cocoEval.summarize()
+        dts_res = []
+
+        for batch_id, data in enumerate(test_reader()):
+            nmsed_out_v = exe.run(fluid.default_main_program(),
+                                  feed=feeder.feed(data),
+                                  fetch_list=[nmsed_out],
+                                  return_numpy=False)
+            if batch_id % 20 == 0:
+                print("Batch {0}".format(batch_id))
+
+            lod = nmsed_out_v[0].lod()[0]
+            nmsed_out_v = np.array(nmsed_out_v[0])
+            real_batch_size = min(batch_size, len(data))
+            assert (len(lod) == real_batch_size + 1), \
+            "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
+            k = 0
+            for i in range(real_batch_size):
+                dt_num_this_img = lod[i + 1] - lod[i]
+                image_id = int(data[i][4][0])
+                image_width = int(data[i][4][1])
+                image_height = int(data[i][4][2])
+                for j in range(dt_num_this_img):
+                    dt = nmsed_out_v[k]
+                    k = k + 1
+                    category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+                    xmin = max(min(xmin, 1.0), 0.0) * image_width
+                    ymin = max(min(ymin, 1.0), 0.0) * image_height
+                    xmax = max(min(xmax, 1.0), 0.0) * image_width
+                    ymax = max(min(ymax, 1.0), 0.0) * image_height
+                    w = xmax - xmin
+                    h = ymax - ymin
+                    bbox = [xmin, ymin, w, h]
+                    dt_res = {
+                        'image_id': image_id,
+                        'category_id': category_id,
+                        'bbox': bbox,
+                        'score': score
+                    }
+                    dts_res.append(dt_res)
+
+        with open("detection_result.json", 'w') as outfile:
+            json.dump(dts_res, outfile)
+        print("start evaluate using coco api")
+        cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
+        cocoDt = cocoGt.loadRes("detection_result.json")
+        cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
+        cocoEval.evaluate()
+        cocoEval.accumulate()
+        cocoEval.summarize()
 
     test()
 
diff --git a/fluid/object_detection/reader.py b/fluid/object_detection/reader.py
index 67af57f1a7..a30ae797b9 100644
--- a/fluid/object_detection/reader.py
+++ b/fluid/object_detection/reader.py
@@ -35,7 +35,7 @@ def __init__(self,
                  apply_distort=True,
                  apply_expand=True,
                  ap_version='11point',
-                 toy=0): 
+                 toy=0):
         self._dataset = dataset
         self._ap_version = ap_version
         self._toy = toy

From a5e427fdc226ca4cfefae5d798e7ba590fc2b676 Mon Sep 17 00:00:00 2001
From: buxingyuan <sefira32@gmail.com>
Date: Thu, 26 Apr 2018 00:09:07 +0800
Subject: [PATCH 38/40] follow comments

---
 fluid/object_detection/README.md              | 26 ++++++--
 fluid/object_detection/eval.py                |  2 -
 .../{eval_cocoMAP.py => eval_coco_map.py}     | 65 ++++++++++---------
 fluid/object_detection/infer.py               |  2 -
 fluid/object_detection/train.py               | 10 +--
 5 files changed, 58 insertions(+), 47 deletions(-)
 rename fluid/object_detection/{eval_cocoMAP.py => eval_coco_map.py} (77%)

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 7d10e8d152..8263c8a03e 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -74,13 +74,31 @@ TBD
 
 ### Evaluate
 
-```python
-env CUDA_VISIABLE_DEVICES=0 python eval.py --dataset='pascalvoc' --model_dir='train_pascal_model/90' --test_list='' --ap_version='11point'
-env CUDA_VISIABLE_DEVICES=0 python eval_cocoMAP.py --dataset='coco2014' --model_dir='train_coco_model/24'
+You can evaluate your trained model in different metric like 11point, integral on both PASCAL VOC and COCO dataset. Moreover, we provide eval_coco_map.py which uses a COCO-specific mAP metric defined by [COCO committee](http://cocodataset.org/#detections-eval). To use this eval_coco_map.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed. 
+Install the cocoapi:
+```
+# COCOAPI=/path/to/clone/cocoapi
+git clone https://github.com/cocodataset/cocoapi.git $COCOAPI
+cd $COCOAPI/PythonAPI
+# Install into global site-packages
+make install
+# Alternatively, if you do not have permissions or prefer
+# not to install the COCO API into global site-packages
+python2 setup.py install --user
 ```
-You can evaluate your trained model in difference metric like 11point, integral on both PASCAL VOC and COCO dataset. Moreover, we provide eval_cocoMAP.py which uses a COCO-specific mAP metric defined by [COCO committee](http://cocodataset.org/#detections-eval). To use this eval_cocoMAP.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.
 Note we set the defualt test list to the dataset's test/val list, you can use your own test list by setting test_list args.
 
+#### Evaluate on PASCAL VOC
+```python
+env CUDA_VISIABLE_DEVICES=0 python eval.py --dataset='pascalvoc' --model_dir='train_pascal_model/90' --data_dir='data/pascalvoc' --test_list='test.txt' --ap_version='11point'
+```
+
+#### Evaluate on MS-COCO
+```python
+env CUDA_VISIABLE_DEVICES=0 python eval.py --dataset='coco2014' --nms_threshold=0.5 --model_dir='train_coco_model/40' --test_list='annotations/instances_minival2014.json' --ap_version='integral'
+env CUDA_VISIABLE_DEVICES=0 python eval_coco_map.py --dataset='coco2017' --nms_threshold=0.5 --model_dir='train_coco_model/40' --test_list='annotations/instances_minival2017.json'
+```
+
 TBD
 
 ### Infer and Visualize
diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 66716c2d46..56bf96bbfa 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -54,10 +54,8 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
     exe = fluid.Executor(place)
 
     if model_dir:
-
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
-
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     test_reader = paddle.batch(
diff --git a/fluid/object_detection/eval_cocoMAP.py b/fluid/object_detection/eval_coco_map.py
similarity index 77%
rename from fluid/object_detection/eval_cocoMAP.py
rename to fluid/object_detection/eval_coco_map.py
index 03eaccf5a3..739e701035 100644
--- a/fluid/object_detection/eval_cocoMAP.py
+++ b/fluid/object_detection/eval_coco_map.py
@@ -59,10 +59,8 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
     exe = fluid.Executor(place)
 
     if model_dir:
-
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
-
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     test_reader = paddle.batch(
@@ -71,6 +69,38 @@ def if_exist(var):
         place=place,
         feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
 
+    def get_dt_res(nmsed_out_v):
+        dts_res = []
+        lod = nmsed_out_v[0].lod()[0]
+        nmsed_out_v = np.array(nmsed_out_v[0])
+        real_batch_size = min(batch_size, len(data))
+        assert (len(lod) == real_batch_size + 1), \
+        "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
+        k = 0
+        for i in range(real_batch_size):
+            dt_num_this_img = lod[i + 1] - lod[i]
+            image_id = int(data[i][4][0])
+            image_width = int(data[i][4][1])
+            image_height = int(data[i][4][2])
+            for j in range(dt_num_this_img):
+                dt = nmsed_out_v[k]
+                k = k + 1
+                category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
+                xmin = max(min(xmin, 1.0), 0.0) * image_width
+                ymin = max(min(ymin, 1.0), 0.0) * image_height
+                xmax = max(min(xmax, 1.0), 0.0) * image_width
+                ymax = max(min(ymax, 1.0), 0.0) * image_height
+                w = xmax - xmin
+                h = ymax - ymin
+                bbox = [xmin, ymin, w, h]
+                dt_res = {
+                    'image_id': image_id,
+                    'category_id': category_id,
+                    'bbox': bbox,
+                    'score': score
+                }
+                dts_res.append(dt_res)
+
     def test():
         dts_res = []
 
@@ -81,36 +111,7 @@ def test():
                                   return_numpy=False)
             if batch_id % 20 == 0:
                 print("Batch {0}".format(batch_id))
-
-            lod = nmsed_out_v[0].lod()[0]
-            nmsed_out_v = np.array(nmsed_out_v[0])
-            real_batch_size = min(batch_size, len(data))
-            assert (len(lod) == real_batch_size + 1), \
-            "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
-            k = 0
-            for i in range(real_batch_size):
-                dt_num_this_img = lod[i + 1] - lod[i]
-                image_id = int(data[i][4][0])
-                image_width = int(data[i][4][1])
-                image_height = int(data[i][4][2])
-                for j in range(dt_num_this_img):
-                    dt = nmsed_out_v[k]
-                    k = k + 1
-                    category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
-                    xmin = max(min(xmin, 1.0), 0.0) * image_width
-                    ymin = max(min(ymin, 1.0), 0.0) * image_height
-                    xmax = max(min(xmax, 1.0), 0.0) * image_width
-                    ymax = max(min(ymax, 1.0), 0.0) * image_height
-                    w = xmax - xmin
-                    h = ymax - ymin
-                    bbox = [xmin, ymin, w, h]
-                    dt_res = {
-                        'image_id': image_id,
-                        'category_id': category_id,
-                        'bbox': bbox,
-                        'score': score
-                    }
-                    dts_res.append(dt_res)
+            dts_res += get_dt_res(nmsed_out_v)
 
         with open("detection_result.json", 'w') as outfile:
             json.dump(dts_res, outfile)
diff --git a/fluid/object_detection/infer.py b/fluid/object_detection/infer.py
index b27aa5e688..1650cfb67f 100644
--- a/fluid/object_detection/infer.py
+++ b/fluid/object_detection/infer.py
@@ -45,10 +45,8 @@ def infer(args, data_args, image_path, model_dir):
     exe = fluid.Executor(place)
 
     if model_dir:
-
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
-
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
 
     infer_reader = reader.infer(data_args, image_path)
diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py
index 7ccce8405f..ded1e9121a 100644
--- a/fluid/object_detection/train.py
+++ b/fluid/object_detection/train.py
@@ -16,7 +16,7 @@
 # yapf: disable
 add_arg('learning_rate',    float, 0.001,     "Learning rate.")
 add_arg('batch_size',       int,   32,        "Minibatch size.")
-add_arg('num_passes',       int,   25,        "Epoch number.")
+add_arg('num_passes',       int,   120,       "Epoch number.")
 add_arg('use_gpu',          bool,  True,      "Whether use GPU.")
 add_arg('dataset',          str,   'pascalvoc', "coco2014, coco2017, and pascalvoc.")
 add_arg('model_save_dir',   str,   'model',     "The path to save model.")
@@ -116,10 +116,8 @@ def parallel_do(args,
     exe.run(fluid.default_startup_program())
 
     if pretrained_model:
-
         def if_exist(var):
             return os.path.exists(os.path.join(pretrained_model, var.name))
-
         fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
 
     train_reader = paddle.batch(
@@ -137,7 +135,7 @@ def test(pass_id):
             test_map = exe.run(test_program,
                                feed=feeder.feed(data),
                                fetch_list=[accum_map])
-        print("Test {0}, map {1}".format(pass_id, test_map[0]))
+        print("Pass {0}, test map {1}".format(pass_id, test_map[0]))
 
     for pass_id in range(num_passes):
         start_time = time.time()
@@ -239,10 +237,8 @@ def parallel_exe(args,
     exe.run(fluid.default_startup_program())
 
     if pretrained_model:
-
         def if_exist(var):
             return os.path.exists(os.path.join(pretrained_model, var.name))
-
         fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
 
     train_exe = fluid.ParallelExecutor(
@@ -276,7 +272,7 @@ def test(pass_id, best_map):
         if test_map[0] > best_map:
             best_map = test_map[0]
             save_model('best_model')
-        print("Pass {0}, map {1}".format(pass_id, test_map[0]))
+        print("Pass {0}, test map {1}".format(pass_id, test_map[0]))
 
     for pass_id in range(num_passes):
         start_time = time.time()

From ba0a42f4ce725516271b4a76844877ac6aa1eb62 Mon Sep 17 00:00:00 2001
From: buxingyuan <sefira32@gmail.com>
Date: Thu, 26 Apr 2018 00:18:19 +0800
Subject: [PATCH 39/40] follow yapf

---
 fluid/object_detection/README.md        | 2 +-
 fluid/object_detection/eval.py          | 4 ++--
 fluid/object_detection/eval_coco_map.py | 4 ++--
 fluid/object_detection/infer.py         | 4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/fluid/object_detection/README.md b/fluid/object_detection/README.md
index 8263c8a03e..9ef05eae9e 100644
--- a/fluid/object_detection/README.md
+++ b/fluid/object_detection/README.md
@@ -74,7 +74,7 @@ TBD
 
 ### Evaluate
 
-You can evaluate your trained model in different metric like 11point, integral on both PASCAL VOC and COCO dataset. Moreover, we provide eval_coco_map.py which uses a COCO-specific mAP metric defined by [COCO committee](http://cocodataset.org/#detections-eval). To use this eval_coco_map.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed. 
+You can evaluate your trained model in different metric like 11point, integral on both PASCAL VOC and COCO dataset. Moreover, we provide eval_coco_map.py which uses a COCO-specific mAP metric defined by [COCO committee](http://cocodataset.org/#detections-eval). To use this eval_coco_map.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.
 Install the cocoapi:
 ```
 # COCOAPI=/path/to/clone/cocoapi
diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 56bf96bbfa..79c872bf18 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -52,12 +52,12 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
 
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
-
+# yapf: disable
     if model_dir:
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
-
+# yapf: enable
     test_reader = paddle.batch(
         reader.test(data_args, test_list), batch_size=batch_size)
     feeder = fluid.DataFeeder(
diff --git a/fluid/object_detection/eval_coco_map.py b/fluid/object_detection/eval_coco_map.py
index 739e701035..2eccb10964 100644
--- a/fluid/object_detection/eval_coco_map.py
+++ b/fluid/object_detection/eval_coco_map.py
@@ -57,12 +57,12 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
 
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
-
+# yapf: disable
     if model_dir:
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
-
+# yapf: enable
     test_reader = paddle.batch(
         reader.test(data_args, test_list), batch_size=batch_size)
     feeder = fluid.DataFeeder(
diff --git a/fluid/object_detection/infer.py b/fluid/object_detection/infer.py
index 1650cfb67f..c1d754f670 100644
--- a/fluid/object_detection/infer.py
+++ b/fluid/object_detection/infer.py
@@ -43,12 +43,12 @@ def infer(args, data_args, image_path, model_dir):
 
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
-
+# yapf: disable
     if model_dir:
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
-
+# yapf: enable
     infer_reader = reader.infer(data_args, image_path)
     feeder = fluid.DataFeeder(place=place, feed_list=[image])
 

From b016ed6b4a7bfc74b4a3f6120ac1905b250f0bad Mon Sep 17 00:00:00 2001
From: buxingyuan <sefira32@gmail.com>
Date: Thu, 26 Apr 2018 00:35:45 +0800
Subject: [PATCH 40/40] follow yapf

---
 fluid/object_detection/eval.py          | 4 ++--
 fluid/object_detection/eval_coco_map.py | 4 ++--
 fluid/object_detection/infer.py         | 4 ++--
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/fluid/object_detection/eval.py b/fluid/object_detection/eval.py
index 79c872bf18..12783d83e4 100644
--- a/fluid/object_detection/eval.py
+++ b/fluid/object_detection/eval.py
@@ -52,12 +52,12 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
 
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
-# yapf: disable
+    # yapf: disable
     if model_dir:
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
-# yapf: enable
+    # yapf: enable
     test_reader = paddle.batch(
         reader.test(data_args, test_list), batch_size=batch_size)
     feeder = fluid.DataFeeder(
diff --git a/fluid/object_detection/eval_coco_map.py b/fluid/object_detection/eval_coco_map.py
index 2eccb10964..741badf65d 100644
--- a/fluid/object_detection/eval_coco_map.py
+++ b/fluid/object_detection/eval_coco_map.py
@@ -57,12 +57,12 @@ def eval(args, data_args, test_list, batch_size, model_dir=None):
 
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
-# yapf: disable
+    # yapf: disable
     if model_dir:
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
-# yapf: enable
+    # yapf: enable
     test_reader = paddle.batch(
         reader.test(data_args, test_list), batch_size=batch_size)
     feeder = fluid.DataFeeder(
diff --git a/fluid/object_detection/infer.py b/fluid/object_detection/infer.py
index c1d754f670..698a89ad0b 100644
--- a/fluid/object_detection/infer.py
+++ b/fluid/object_detection/infer.py
@@ -43,12 +43,12 @@ def infer(args, data_args, image_path, model_dir):
 
     place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
     exe = fluid.Executor(place)
-# yapf: disable
+    # yapf: disable
     if model_dir:
         def if_exist(var):
             return os.path.exists(os.path.join(model_dir, var.name))
         fluid.io.load_vars(exe, model_dir, predicate=if_exist)
-# yapf: enable
+    # yapf: enable
     infer_reader = reader.infer(data_args, image_path)
     feeder = fluid.DataFeeder(place=place, feed_list=[image])